From f0d0f5283f04f81b2b887f61808500f8490ec39d Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Wed, 21 Apr 2010 11:39:48 -0400 Subject: [PATCH 001/309] Allow override of device asserts, including multi-device support. Set in board file with TARGET_OTA_ASSERT_DEVICE. (cherry-picked from commit 0f452f21fc9323b9d1fe746161761cf40aaa5030) Update for 6.0.1 Change-Id: I3d06bdc0e3e26bde0c0e646accd050364f9713b9 Signed-off-by: Josue Rivera --- core/Makefile | 6 ++++++ tools/releasetools/edify_generator.py | 10 ++++++---- tools/releasetools/ota_from_target_files | 13 +++++++++++-- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/core/Makefile b/core/Makefile index c2cb8ee7e..239e68a44 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1643,6 +1643,12 @@ else $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false endif +ifeq ($(TARGET_OTA_ASSERT_DEVICE),) + $(INTERNAL_OTA_PACKAGE_TARGET): override_device := auto +else + $(INTERNAL_OTA_PACKAGE_TARGET): override_device := $(TARGET_OTA_ASSERT_DEVICE) +endif + $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) @echo "Package OTA: $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 6b1986496..1d0700c09 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -117,10 +117,12 @@ def AssertOlderBuild(self, timestamp, timestamp_text): def AssertDevice(self, device): """Assert that the device identifier is the given string.""" - cmd = ('get_device_compatible("%s") == "OK" || ' - 'abort("This package is for \\"%s\\" devices; ' - 'this is a \\"" + getprop("ro.product.device") + "\\".");') % ( - device, device) + cmd = ('assert(' + + ' || \0'.join(['get_device_compatible("%s") == "OK" || getprop("ro.build.product") == "%s"' + % (i, i) for i in device.split(",")]) + + ' || abort("This package is for device: %s; ' + + 'this device is " + getprop("ro.product.device") + ".");' + + ');') % device self.script.append(cmd) def AssertSomeBootloader(self, *bootloaders): diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 02d1f541f..18283fa01 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -93,6 +93,9 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package Enable or disable the execution of backuptool.sh. Disabled by default. + --override_device + Override device-specific asserts. Can be a comma-separated list. + """ import sys @@ -133,6 +136,7 @@ OPTIONS.fallback_to_full = True OPTIONS.full_radio = False OPTIONS.full_bootloader = False OPTIONS.backuptool = False +OPTIONS.override_device = 'auto' def MostPopularKey(d, default): """Given a dict, return the key corresponding to the largest @@ -412,7 +416,10 @@ def SignOutput(temp_zip_name, output_zip_name): def AppendAssertions(script, info_dict, oem_dict=None): oem_props = info_dict.get("oem_fingerprint_properties") if oem_props is None or len(oem_props) == 0: - device = GetBuildProp("ro.product.device", info_dict) + if OPTIONS.override_device == "auto": + device = GetBuildProp("ro.product.device", info_dict) + else: + device = OPTIONS.override_device script.AssertDevice(device) else: if oem_dict is None: @@ -444,7 +451,6 @@ def GetOemProperty(name, oem_props, oem_dict, info_dict): return oem_dict[name] return GetBuildProp(name, info_dict) - def CalculateFingerprint(oem_props, oem_dict, info_dict): if oem_props is None: return GetBuildProp("ro.build.fingerprint", info_dict) @@ -1551,6 +1557,8 @@ def main(argv): OPTIONS.fallback_to_full = False elif o in ("--backup"): OPTIONS.backuptool = bool(a.lower() == 'true') + elif o in ("--override_device"): + OPTIONS.override_device = a else: return False return True @@ -1576,6 +1584,7 @@ def main(argv): "verify", "no_fallback_to_full", "backup=", + "override_device="], ], extra_option_handler=option_handler) if len(args) != 2: From 58f29ffab4d51af0f549e16102ce34a20ea30f47 Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Sat, 26 Nov 2011 18:51:42 -0800 Subject: [PATCH 002/309] roomservice delivers you lunch combos from the SlimRoms github. fix roomservice formatting support product names with _ in them fix roomservice to handle pagination Change-Id: I4923c2f768094dbad8e06a72d9f27d46414030ab --- envsetup.sh | 9 ++++++++ tools/roomservice.py | 55 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100755 tools/roomservice.py diff --git a/envsetup.sh b/envsetup.sh index a89616f85..4c5991c9c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -588,6 +588,15 @@ function lunch() local product=$(echo -n $selection | sed -e "s/-.*$//") check_product $product if [ $? -ne 0 ] + then + # if we can't find a product, try to grab it off the SLIM github + T=$(gettop) + pushd $T > /dev/null + build/tools/roomservice.py $product + popd > /dev/null + check_product $product + fi + if [ $? -ne 0 ] then echo echo "** Don't have a product spec for: '$product'" diff --git a/tools/roomservice.py b/tools/roomservice.py new file mode 100755 index 000000000..3ae6be0e7 --- /dev/null +++ b/tools/roomservice.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +import os +import sys +import urllib2 +import json +from xml.etree import ElementTree + +product = sys.argv[1]; +device = product[product.index("_") + 1:] +print "Device %s not found. Attempting to retrieve device repository from SlimRoms Github (http://github.com/SlimRoms)." % device + +repositories = [] + +page = 1 +while True: + result = json.loads(urllib2.urlopen("http://github.com/api/v2/json/repos/show/SlimRoms?page=%d" % page).read()) + if len(result['repositories']) == 0: + break + repositories = repositories + result['repositories'] + page = page + 1 + +for repository in repositories: + repo_name = repository['name'] + if repo_name.startswith("android_device_") and repo_name.endswith("_" + device): + print "Found repository: %s" % repository['name'] + manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "") + + try: + lm = ElementTree.parse(".repo/slim_manifest.xml") + lm = lm.getroot() + except: + lm = ElementTree.Element("manifest") + + for child in lm.getchildren(): + if child.attrib['name'].endswith("_" + device): + print "Duplicate device '%s' found in slim_manifest.xml." % child.attrib['name'] + sys.exit() + + repo_path = "device/%s/%s" % (manufacturer, device) + project = ElementTree.Element("project", attrib = { "path": repo_path, "remote": "github", "name": "SlimRoms/%s" % repository['name'] }) + lm.append(project) + + raw_xml = ElementTree.tostring(lm) + raw_xml = '\n' + raw_xml + + f = open('.repo/slim_manifest.xml', 'w') + f.write(raw_xml) + f.close() + + print "Syncing repository to retrieve project." + os.system('repo sync %s' % repo_path) + print "Done!" + sys.exit() + +print "Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your slim_manifest.xml." % device From a0ec659e002303227247734de7cc53a015a8865b Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Thu, 1 Dec 2011 00:21:02 -0800 Subject: [PATCH 003/309] build: Run squisher for the bacon target Change-Id: If33402a5eb1661345c7d37ec1778742096449c70 TARGET_CUSTOM_RELEASETOOL is a squisher replacement Change-Id: I9c7126d6bbe0303c1d35bb54ec2f0c9187b131e0 Fix tabs, sorry bad commit --amend :p Change-Id: Ia981edb520d7b5f91e0717f6b4d8180386c8a20d --- core/Makefile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/core/Makefile b/core/Makefile index 239e68a44..c0399a613 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1657,16 +1657,19 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) -p $(HOST_OUT) \ -k $(KEY_CERT_PAIR) \ --backup=$(backuptool) \ + --override_device=$(override_device) \ $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \ $(BUILT_TARGET_FILES_PACKAGE) $@ -ifneq ($(TARGET_CUSTOM_RELEASETOOL),) - @echo "Running releasetool" - $(TARGET_CUSTOM_RELEASETOOL) -endif .PHONY: otapackage bacon otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage +ifneq ($(TARGET_CUSTOM_RELEASETOOL),) + @echo "Running releasetool" + $(hide) $(TARGET_CUSTOM_RELEASETOOL) +else + $(hide) ./vendor/cm/tools/squisher +endif endif # recovery_fstab is defined endif # TARGET_NO_KERNEL != true From f2a5ea08f1f2a304ef7b2820a8339cbada668b2a Mon Sep 17 00:00:00 2001 From: Robert Burns Date: Wed, 4 Jan 2012 20:14:36 -0500 Subject: [PATCH 004/309] Add ro.slim.device to build.prop As requested by Ricardo Cerqueira: http://r.cyanogenmod.com/#change,11423,patchset=1 Change-Id: If08cc13c0f0e92e0dd262a2f71610070a54aaead --- core/Makefile | 1 + tools/buildinfo.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/core/Makefile b/core/Makefile index c0399a613..647e95ed1 100644 --- a/core/Makefile +++ b/core/Makefile @@ -193,6 +193,7 @@ endif $(hide) TARGET_BUILD_TYPE="$(TARGET_BUILD_VARIANT)" \ TARGET_BUILD_FLAVOR="$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)" \ TARGET_DEVICE="$(TARGET_DEVICE)" \ + SLIM_DEVICE="$(TARGET_DEVICE)" \ PRODUCT_NAME="$(TARGET_PRODUCT)" \ PRODUCT_BRAND="$(PRODUCT_BRAND)" \ PRODUCT_DEFAULT_LOCALE="$(call get-default-product-locale,$(PRODUCT_LOCALES))" \ diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh index 5c199b809..85d60a33e 100755 --- a/tools/buildinfo.sh +++ b/tools/buildinfo.sh @@ -56,4 +56,6 @@ if [ -n "$BUILD_THUMBPRINT" ] ; then fi echo "ro.build.characteristics=$TARGET_AAPT_CHARACTERISTICS" +echo "ro.slim.device=$SLIM_DEVICE" + echo "# end build properties" From fa60d411f61d211e627a3b2fdf1b3ffd43294c48 Mon Sep 17 00:00:00 2001 From: Tanguy Pruvot Date: Mon, 9 Jan 2012 15:42:26 +0100 Subject: [PATCH 005/309] build in colors: Install outputs in blue/yellow [basic ics version] "target prefix:" in yellow "Install: file" in cyan should be in a single command line else there are sync problems in multithread (-j X) These colors can be tuned in core/Makefile if you use a white terminal. can be disabled with "export BUILD_WITH_COLORS=0" Only the most important output is colored to reduce ics merge problems Change-Id: I0e585079fde6900799ef209367a523d94a51cda5 Colors: add more colors to final build steps also fix releasetool echo and visible recovery echos Change-Id: Icf5d88468572f935610c544bf1d5d356ec9870d3 build in colors: host C/C++ Change-Id: Ic415cab53a2efa104c9d4b31ddbe8c8eb74e493d --- core/Makefile | 31 +++++++++++++++++++++++-------- core/base_rules.mk | 4 ++-- core/definitions.mk | 16 ++++++++-------- 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/core/Makefile b/core/Makefile index 647e95ed1..0fec2fa92 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1,5 +1,16 @@ # Put some miscellaneous rules here +# Build system colors +# +# PFX: Prefix "target C++:" in yellow +# INS: Module "Install:" output color (cyan for ics) + +ifneq ($(BUILD_WITH_COLORS),0) + CL_PFX="\033[33m" + CL_INS="\033[36m" + CL_RST="\033[0m" +endif + # HACK: clear LOCAL_PATH from including last build target before calling # intermedites-dir-for LOCAL_PATH := $(BUILD_SYSTEM) @@ -512,6 +523,7 @@ $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_S $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ $(BOOT_SIGNER) /boot $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $@ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) + @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} .PHONY: bootimage-nodeps bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER) @@ -541,12 +553,14 @@ $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(call pretty,"Target boot image: $@") $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) + @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} .PHONY: bootimage-nodeps bootimage-nodeps: $(MKBOOTIMG) @echo "make $@: ignoring dependencies" $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET) $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)) + @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} endif # TARGET_BOOTIMAGE_USE_EXT2 @@ -899,7 +913,7 @@ define build-recoveryimage-target $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) - @echo ----- Made recovery image: $(1) -------- + @echo -e ${CL_INS}"Made recovery image: $@"${CL_RST} endef $(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \ @@ -1104,7 +1118,7 @@ endif $(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) $(RECOVERY_FROM_BOOT_PATCH) | $(ACP) - @echo "Install system fs image: $@" + @echo -e ${CL_INS}"Install system fs image: $@"${CL_RST} $(copy-file-to-target) $(hide) $(call assert-max-image-size,$@ $(RECOVERY_FROM_BOOT_PATCH),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)) @@ -1485,7 +1499,7 @@ $(BUILT_TARGET_FILES_PACKAGE): \ $(APKCERTS_FILE) \ $(HOST_OUT_EXECUTABLES)/fs_config \ | $(ACP) - @echo "Package target files: $@" + @echo -e ${CL_PFX}"Package target files:"${CL_RST}" $@" $(hide) rm -rf $@ $(zip_root) $(hide) mkdir -p $(dir $@) $(zip_root) @# Components of the recovery image @@ -1651,7 +1665,7 @@ else endif $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) - @echo "Package OTA: $@" + @echo -e ${CL_PFX}"Package OTA:"${CL_RST}" $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ $(OTA_FROM_TARGET_SCRIPT) -v \ --block \ @@ -1666,9 +1680,10 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage ifneq ($(TARGET_CUSTOM_RELEASETOOL),) - @echo "Running releasetool" + @echo "Running custom releasetool..." $(hide) $(TARGET_CUSTOM_RELEASETOOL) else + @echo "Running releasetool..." $(hide) ./vendor/cm/tools/squisher endif @@ -1695,7 +1710,7 @@ else endif $(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) - @echo "Package: $@" + @echo -e ${CL_PFX}"Package:"${CL_RST}" $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ $(IMG_FROM_TARGET_SCRIPT) -v \ -p $(HOST_OUT) \ @@ -1737,7 +1752,7 @@ name := $(name)-apps-$(FILE_NAME_TAG) APPS_ZIP := $(PRODUCT_OUT)/$(name).zip $(APPS_ZIP): $(INSTALLED_SYSTEMIMAGE) - @echo "Package apps: $@" + @echo -e ${CL_PFX}"Package apps:"${CL_RST}" $@" $(hide) rm -rf $@ $(hide) mkdir -p $(dir $@) $(hide) zip -qj $@ $(TARGET_OUT_APPS)/*/*.apk $(TARGET_OUT_APPS_PRIVILEGED)/*/*.apk @@ -1799,7 +1814,7 @@ name := $(TARGET_PRODUCT)-emulator-$(FILE_NAME_TAG) INTERNAL_EMULATOR_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip $(INTERNAL_EMULATOR_PACKAGE_TARGET): $(INTERNAL_EMULATOR_PACKAGE_FILES) - @echo "Package: $@" + @echo -e ${CL_PFX}"Package:"${CL_RST}" $@" $(hide) zip -qj $@ $(INTERNAL_EMULATOR_PACKAGE_FILES) endif diff --git a/core/base_rules.mk b/core/base_rules.mk index ea64cc6a7..ee9916572 100644 --- a/core/base_rules.mk +++ b/core/base_rules.mk @@ -556,12 +556,12 @@ ifndef LOCAL_UNINSTALLABLE_MODULE $(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := $(LOCAL_POST_INSTALL_CMD) ifneq ($(LOCAL_ACP_UNAVAILABLE),true) $(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE) | $(ACP) - @echo "Install: $@" + @echo -e ${CL_INS}"Install: $@"${CL_RST} $(copy-file-to-new-target) $(PRIVATE_POST_INSTALL_CMD) else $(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE) - @echo "Install: $@" + @echo -e ${CL_INS}"Install: $@"${CL_RST} $(copy-file-to-target-with-cp) endif diff --git a/core/definitions.mk b/core/definitions.mk index 9dea18c20..7d1f8762d 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -1051,7 +1051,7 @@ endef define transform-cpp-to-o @mkdir -p $(dir $@) -@echo "target $(PRIVATE_ARM_MODE) C++: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_PFX}"target $(PRIVATE_ARM_MODE) C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(hide) $(PRIVATE_CXX) \ $(addprefix -I , $(PRIVATE_C_INCLUDES)) \ $(shell cat $(PRIVATE_IMPORT_INCLUDES)) \ @@ -1103,7 +1103,7 @@ $(hide) $(PRIVATE_CC) \ endef define transform-c-to-o-no-deps -@echo "target $(PRIVATE_ARM_MODE) C: $(PRIVATE_MODULE) <= $<" +@echo ${CL_PFX}"target $(PRIVATE_ARM_MODE) C:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-c-or-s-to-o-no-deps, \ $(PRIVATE_CFLAGS) \ $(PRIVATE_CONLYFLAGS) \ @@ -1112,7 +1112,7 @@ $(call transform-c-or-s-to-o-no-deps, \ endef define transform-s-to-o-no-deps -@echo "target asm: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_PFX}"target asm:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-c-or-s-to-o-no-deps, $(PRIVATE_ASFLAGS)) endef @@ -1143,7 +1143,7 @@ endef ########################################################### define transform-m-to-o-no-deps -@echo "target ObjC: $(PRIVATE_MODULE) <= $<" +@echo ${CL_PFX}"target ObjC:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_DEBUG_CFLAGS)) endef @@ -1158,7 +1158,7 @@ endef define transform-host-cpp-to-o @mkdir -p $(dir $@) -@echo "host C++: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_PFX}"host C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(hide) $(PRIVATE_CXX) \ $(addprefix -I , $(PRIVATE_C_INCLUDES)) \ $(shell cat $(PRIVATE_IMPORT_INCLUDES)) \ @@ -1205,7 +1205,7 @@ $(hide) $(PRIVATE_CC) \ endef define transform-host-c-to-o-no-deps -@echo "host C: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_PFX}"host C:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_CONLYFLAGS) $(PRIVATE_DEBUG_CFLAGS)) endef @@ -1832,7 +1832,7 @@ $(if $(PRIVATE_EXTRA_JAR_ARGS),$(call add-java-resources-to,$@)) endef define transform-java-to-classes.jar -@echo "target Java: $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))" +@echo -e ${CL_PFX}"target Java:"${CL_RST}" $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))" $(call compile-java,$(TARGET_JAVAC),$(PRIVATE_BOOTCLASSPATH)) endef @@ -2313,7 +2313,7 @@ endef ########################################################### define transform-generated-source -@echo "target Generated: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_PFX}"target Generated:"${CL_RST}" $(PRIVATE_MODULE) <= $<" @mkdir -p $(dir $@) $(hide) $(PRIVATE_CUSTOM_TOOL) endef From 266e3a9b299a6595b2441bfd735f0b9548cb0ff9 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sat, 13 Oct 2012 13:05:29 +0530 Subject: [PATCH 006/309] build-with-colors: moar colors - rename CL_PFX and CL_INS to better match the colors they show. - add more colors. Output of: - most host stuff is yellow - most target stuff is green - installing/copying files is cyan - import/export includes and notice files is cyan - bootimage/recoveryimage is cyan - and some more colors in many places ;) Change-Id: I5532afa4ba608e0a7c408516dc9f912f9ca389f7 --- core/Makefile | 39 ++++++++++---------- core/base_rules.mk | 6 ++-- core/binary.mk | 4 +-- core/cleanbuild.mk | 4 +-- core/definitions.mk | 60 +++++++++++++++---------------- core/distdir.mk | 2 +- core/droiddoc.mk | 6 ++-- core/dynamic_binary.mk | 6 ++-- core/host_java_library.mk | 3 +- core/java.mk | 12 +++---- core/java_library.mk | 6 ++-- core/main.mk | 6 ++-- core/notice_files.mk | 2 +- core/pdk_config.mk | 2 +- core/static_java_library.mk | 2 +- core/tasks/apicheck.mk | 4 +-- core/tasks/collect_gpl_sources.mk | 2 +- core/tasks/product-graph.mk | 2 +- core/tasks/sdk-addon.mk | 2 +- tools/apicheck/Android.mk | 2 +- 20 files changed, 86 insertions(+), 86 deletions(-) diff --git a/core/Makefile b/core/Makefile index 0fec2fa92..77d0e0ca7 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1,13 +1,14 @@ # Put some miscellaneous rules here # Build system colors -# -# PFX: Prefix "target C++:" in yellow -# INS: Module "Install:" output color (cyan for ics) ifneq ($(BUILD_WITH_COLORS),0) - CL_PFX="\033[33m" - CL_INS="\033[36m" + CL_RED="\033[31m" + CL_GRN="\033[32m" + CL_YLW="\033[33m" + CL_BLU="\033[34m" + CL_MAG="\033[35m" + CL_CYN="\033[36m" CL_RST="\033[0m" endif @@ -523,7 +524,7 @@ $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_S $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ $(BOOT_SIGNER) /boot $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $@ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) - @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} + @echo -e ${CL_CYN}"Made boot image: $@"${CL_RST} .PHONY: bootimage-nodeps bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER) @@ -553,7 +554,7 @@ $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(call pretty,"Target boot image: $@") $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) - @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} + @echo -e ${CL_CYN}"Made boot image: $@"${CL_RST} .PHONY: bootimage-nodeps bootimage-nodeps: $(MKBOOTIMG) @@ -666,7 +667,7 @@ endif # TARGET_BUILD_APPS $(kernel_notice_file): \ prebuilts/qemu-kernel/arm/LINUX_KERNEL_COPYING \ | $(ACP) - @echo Copying: $@ + @echo -e ${CL_CYN}"Copying:"${CL_RST}" $@" $(hide) mkdir -p $(dir $@) $(hide) $(ACP) $< $@ @@ -882,7 +883,7 @@ $(RECOVERY_INSTALL_OTA_KEYS): $(OTA_PUBLIC_KEYS) $(DUMPKEY_JAR) $(extra_keys) RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id # $(1): output file define build-recoveryimage-target - @echo ----- Making recovery image ------ + @echo -e ${CL_CYN}"----- Making recovery image ------"${CL_RST} $(hide) mkdir -p $(TARGET_RECOVERY_OUT) $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/tmp @echo Copying baseline ramdisk... @@ -913,7 +914,7 @@ define build-recoveryimage-target $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) - @echo -e ${CL_INS}"Made recovery image: $@"${CL_RST} + @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} endef $(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \ @@ -1111,14 +1112,14 @@ $(RECOVERY_FROM_BOOT_PATCH): $(INSTALLED_RECOVERYIMAGE_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ $(HOST_OUT_EXECUTABLES)/imgdiff \ $(HOST_OUT_EXECUTABLES)/bsdiff - @echo "Construct recovery from boot" + @echo -e ${CL_CYN}"Construct recovery from boot"${CL_RST} mkdir -p $(dir $@) PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/imgdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@ endif $(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) $(RECOVERY_FROM_BOOT_PATCH) | $(ACP) - @echo -e ${CL_INS}"Install system fs image: $@"${CL_RST} + @echo -e ${CL_CYN}"Install system fs image: $@"${CL_RST} $(copy-file-to-target) $(hide) $(call assert-max-image-size,$@ $(RECOVERY_FROM_BOOT_PATCH),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)) @@ -1499,7 +1500,7 @@ $(BUILT_TARGET_FILES_PACKAGE): \ $(APKCERTS_FILE) \ $(HOST_OUT_EXECUTABLES)/fs_config \ | $(ACP) - @echo -e ${CL_PFX}"Package target files:"${CL_RST}" $@" + @echo -e ${CL_YLW}"Package target files:"${CL_RST}" $@" $(hide) rm -rf $@ $(zip_root) $(hide) mkdir -p $(dir $@) $(zip_root) @# Components of the recovery image @@ -1665,7 +1666,7 @@ else endif $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) - @echo -e ${CL_PFX}"Package OTA:"${CL_RST}" $@" + @echo -e ${CL_YLW}"Package OTA:"${CL_RST}" $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ $(OTA_FROM_TARGET_SCRIPT) -v \ --block \ @@ -1680,10 +1681,10 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage ifneq ($(TARGET_CUSTOM_RELEASETOOL),) - @echo "Running custom releasetool..." + @echo -e ${CL_YLW}"Running custom releasetool..."${CL_RST} $(hide) $(TARGET_CUSTOM_RELEASETOOL) else - @echo "Running releasetool..." + @echo -e ${CL_YLW}"Running releasetool..."${CL_RST} $(hide) ./vendor/cm/tools/squisher endif @@ -1710,7 +1711,7 @@ else endif $(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) - @echo -e ${CL_PFX}"Package:"${CL_RST}" $@" + @echo -e ${CL_YLW}"Package:"${CL_RST}" $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ $(IMG_FROM_TARGET_SCRIPT) -v \ -p $(HOST_OUT) \ @@ -1752,7 +1753,7 @@ name := $(name)-apps-$(FILE_NAME_TAG) APPS_ZIP := $(PRODUCT_OUT)/$(name).zip $(APPS_ZIP): $(INSTALLED_SYSTEMIMAGE) - @echo -e ${CL_PFX}"Package apps:"${CL_RST}" $@" + @echo -e ${CL_YLW}"Package apps:"${CL_RST}" $@" $(hide) rm -rf $@ $(hide) mkdir -p $(dir $@) $(hide) zip -qj $@ $(TARGET_OUT_APPS)/*/*.apk $(TARGET_OUT_APPS_PRIVILEGED)/*/*.apk @@ -1814,7 +1815,7 @@ name := $(TARGET_PRODUCT)-emulator-$(FILE_NAME_TAG) INTERNAL_EMULATOR_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip $(INTERNAL_EMULATOR_PACKAGE_TARGET): $(INTERNAL_EMULATOR_PACKAGE_FILES) - @echo -e ${CL_PFX}"Package:"${CL_RST}" $@" + @echo -e ${CL_YLW}"Package:"${CL_RST}" $@" $(hide) zip -qj $@ $(INTERNAL_EMULATOR_PACKAGE_FILES) endif diff --git a/core/base_rules.mk b/core/base_rules.mk index ee9916572..0f2accc16 100644 --- a/core/base_rules.mk +++ b/core/base_rules.mk @@ -494,7 +494,7 @@ $(cleantarget) : PRIVATE_CLEAN_FILES := \ $(LOCAL_INSTALLED_MODULE) \ $(intermediates) $(cleantarget):: - @echo "Clean: $(PRIVATE_MODULE)" + @echo -e ${CL_GRN}"Clean:"${CL_RST}" $(PRIVATE_MODULE)" $(hide) rm -rf $(PRIVATE_CLEAN_FILES) ########################################################### @@ -556,12 +556,12 @@ ifndef LOCAL_UNINSTALLABLE_MODULE $(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := $(LOCAL_POST_INSTALL_CMD) ifneq ($(LOCAL_ACP_UNAVAILABLE),true) $(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE) | $(ACP) - @echo -e ${CL_INS}"Install: $@"${CL_RST} + @echo -e ${CL_CYN}"Install: $@"${CL_RST} $(copy-file-to-new-target) $(PRIVATE_POST_INSTALL_CMD) else $(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE) - @echo -e ${CL_INS}"Install: $@"${CL_RST} + @echo -e ${CL_CYN}"Install: $@"${CL_RST} $(copy-file-to-target-with-cp) endif diff --git a/core/binary.mk b/core/binary.mk index b8003d711..649565979 100644 --- a/core/binary.mk +++ b/core/binary.mk @@ -925,7 +925,7 @@ import_includes_deps := $(strip \ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(LOCAL_IS_HOST_MODULE),,$(LOCAL_2ND_ARCH_VAR_PREFIX))/export_includes)) $(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps) $(import_includes) : $(LOCAL_MODULE_MAKEFILE) $(import_includes_deps) - @echo Import includes file: $@ + @echo -e ${CL_CYN}Import includes file:${CL_RST} $@ $(hide) mkdir -p $(dir $@) && rm -f $@ ifdef import_includes_deps $(hide) for f in $(PRIVATE_IMPORT_EXPORT_INCLUDES); do \ @@ -1120,7 +1120,7 @@ export_includes := $(intermediates)/export_includes $(export_includes): PRIVATE_EXPORT_C_INCLUDE_DIRS := $(my_export_c_include_dirs) # Make sure .pb.h are already generated before any dependent source files get compiled. $(export_includes) : $(LOCAL_MODULE_MAKEFILE) $(proto_generated_headers) - @echo Export includes file: $< -- $@ + @echo -e ${CL_CYN}Export includes file:${CL_RST} $< -- $@ $(hide) mkdir -p $(dir $@) && rm -f $@ ifdef my_export_c_include_dirs $(hide) for d in $(PRIVATE_EXPORT_C_INCLUDE_DIRS); do \ diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk index 801a2924f..4316ad6ba 100644 --- a/core/cleanbuild.mk +++ b/core/cleanbuild.mk @@ -232,13 +232,13 @@ endif dataclean: FILES := $(dataclean_files) dataclean: $(hide) rm -rf $(FILES) - @echo "Deleted emulator userdata images." + @echo -e ${CL_GRN}"Deleted emulator userdata images."${CL_RST} .PHONY: installclean installclean: FILES := $(installclean_files) installclean: dataclean $(hide) rm -rf $(FILES) - @echo "Deleted images and staging directories." + @echo -e ${CL_GRN}"Deleted images and staging directories."${CL_RST} ifeq "$(force_installclean)" "true" $(info *** Forcing "make installclean"...) diff --git a/core/definitions.mk b/core/definitions.mk index 7d1f8762d..302a2b14b 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -1051,7 +1051,7 @@ endef define transform-cpp-to-o @mkdir -p $(dir $@) -@echo -e ${CL_PFX}"target $(PRIVATE_ARM_MODE) C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_GRN}"target $(PRIVATE_ARM_MODE) C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(hide) $(PRIVATE_CXX) \ $(addprefix -I , $(PRIVATE_C_INCLUDES)) \ $(shell cat $(PRIVATE_IMPORT_INCLUDES)) \ @@ -1103,7 +1103,7 @@ $(hide) $(PRIVATE_CC) \ endef define transform-c-to-o-no-deps -@echo ${CL_PFX}"target $(PRIVATE_ARM_MODE) C:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_GRN}"target $(PRIVATE_ARM_MODE) C:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-c-or-s-to-o-no-deps, \ $(PRIVATE_CFLAGS) \ $(PRIVATE_CONLYFLAGS) \ @@ -1112,7 +1112,7 @@ $(call transform-c-or-s-to-o-no-deps, \ endef define transform-s-to-o-no-deps -@echo -e ${CL_PFX}"target asm:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_GRN}"target asm:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-c-or-s-to-o-no-deps, $(PRIVATE_ASFLAGS)) endef @@ -1143,7 +1143,7 @@ endef ########################################################### define transform-m-to-o-no-deps -@echo ${CL_PFX}"target ObjC:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_GRN}"target ObjC:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_DEBUG_CFLAGS)) endef @@ -1158,7 +1158,7 @@ endef define transform-host-cpp-to-o @mkdir -p $(dir $@) -@echo -e ${CL_PFX}"host C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_YLW}"host C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(hide) $(PRIVATE_CXX) \ $(addprefix -I , $(PRIVATE_C_INCLUDES)) \ $(shell cat $(PRIVATE_IMPORT_INCLUDES)) \ @@ -1205,12 +1205,12 @@ $(hide) $(PRIVATE_CC) \ endef define transform-host-c-to-o-no-deps -@echo -e ${CL_PFX}"host C:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_YLW}"host C:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_CONLYFLAGS) $(PRIVATE_DEBUG_CFLAGS)) endef define transform-host-s-to-o-no-deps -@echo "host asm: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_YLW}"host asm:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_ASFLAGS)) endef @@ -1229,7 +1229,7 @@ endef ########################################################### define transform-host-m-to-o-no-deps -@echo "host ObjC: $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_YLW}"host ObjC:"${CL_RST}" $(PRIVATE_MODULE) <= $<" $(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_DEBUG_CFLAGS)) endef @@ -1322,7 +1322,7 @@ endef # $(1): the full path of the source static library. define _extract-and-include-single-target-whole-static-lib -@echo "preparing StaticLib: $(PRIVATE_MODULE) [including $(strip $(1))]" +@echo -e ${CL_YLW}"preparing StaticLib:"${CL_RST}" $(PRIVATE_MODULE) [including $(1)]" $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\ rm -rf $$ldir; \ mkdir -p $$ldir; \ @@ -1366,7 +1366,7 @@ define transform-o-to-static-lib @mkdir -p $(dir $@) @rm -f $@ $(extract-and-include-target-whole-static-libs) -@echo "target StaticLib: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_GRN}"target StaticLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) \ $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \ $(PRIVATE_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS)) @@ -1378,7 +1378,7 @@ endef # $(1): the full path of the source static library. define _extract-and-include-single-host-whole-static-lib -@echo "preparing StaticLib: $(PRIVATE_MODULE) [including $(strip $(1))]" +@echo -e ${CL_YLW}"preparing StaticLib:"${CL_RST}" $(PRIVATE_MODULE) [including $(1)]" $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\ rm -rf $$ldir; \ mkdir -p $$ldir; \ @@ -1415,7 +1415,7 @@ define transform-host-o-to-static-lib @mkdir -p $(dir $@) @rm -f $@ $(extract-and-include-host-whole-static-libs) -@echo "host StaticLib: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_YLW}"host StaticLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_AR) \ $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_GLOBAL_ARFLAGS) \ $(PRIVATE_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS)) @@ -1457,13 +1457,13 @@ endif define transform-host-o-to-shared-lib @mkdir -p $(dir $@) -@echo "host SharedLib: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_YLW}"host SharedLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(transform-host-o-to-shared-lib-inner) endef define transform-host-o-to-package @mkdir -p $(dir $@) -@echo "host Package: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_YLW}"host Package:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(transform-host-o-to-shared-lib-inner) endef @@ -1500,7 +1500,7 @@ endef define transform-o-to-shared-lib @mkdir -p $(dir $@) -@echo "target SharedLib: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_GRN}"target SharedLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(transform-o-to-shared-lib-inner) endef @@ -1516,14 +1516,14 @@ endif define transform-to-stripped @mkdir -p $(dir $@) -@echo "target Strip: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_GRN}"target Strip:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(hide) $(PRIVATE_STRIP) --strip-all $< -o $@ \ $(if $(PRIVATE_NO_DEBUGLINK),,$(TARGET_STRIP_EXTRA)) endef define transform-to-stripped-keep-symbols @mkdir -p $(dir $@) -@echo "target Strip (keep symbols): $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_GRN}"target Strip (keep symbols):"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(hide) $(PRIVATE_OBJCOPY) \ `$(PRIVATE_READELF) -S $< | awk '/.debug_/ {print "-R " $$2}' | xargs` \ $(TARGET_STRIP_KEEP_SYMBOLS_EXTRA) $< $@ @@ -1573,7 +1573,7 @@ endef define transform-o-to-executable @mkdir -p $(dir $@) -@echo "target Executable: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_GRN}"target Executable:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(transform-o-to-executable-inner) endef @@ -1617,7 +1617,7 @@ endef define transform-o-to-static-executable @mkdir -p $(dir $@) -@echo "target StaticExecutable: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_GRN}"target StaticExecutable:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(transform-o-to-static-executable-inner) endef @@ -1663,7 +1663,7 @@ endif define transform-host-o-to-executable @mkdir -p $(dir $@) -@echo "host Executable: $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_YLW}"host Executable:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(transform-host-o-to-executable-inner) endef @@ -1832,7 +1832,7 @@ $(if $(PRIVATE_EXTRA_JAR_ARGS),$(call add-java-resources-to,$@)) endef define transform-java-to-classes.jar -@echo -e ${CL_PFX}"target Java:"${CL_RST}" $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))" +@echo -e ${CL_GRN}"target Java:"${CL_RST}" $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))" $(call compile-java,$(TARGET_JAVAC),$(PRIVATE_BOOTCLASSPATH)) endef @@ -1987,7 +1987,7 @@ endef # only core.jar and framework.jar need a heap this big. # Avoid the memory arguments on Windows, dx fails to load for some reason with them. define transform-classes.jar-to-dex -@echo "target Dex: $(PRIVATE_MODULE)" +@echo -e ${CL_GRN}"target Dex:"${CL_RST}" $(PRIVATE_MODULE)" @mkdir -p $(dir $@) $(hide) rm -f $(dir $@)classes*.dex $(hide) $(DX) \ @@ -2170,7 +2170,7 @@ endef # Note: we intentionally don't clean PRIVATE_CLASS_INTERMEDIATES_DIR # in transform-java-to-classes for the sake of vm-tests. define transform-host-java-to-package -@echo "host Java: $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))" +@echo -e ${CL_YLW}"host Java:"${CL_RST}" $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))" $(call compile-java,$(HOST_JAVAC),$(PRIVATE_BOOTCLASSPATH)) endef @@ -2183,7 +2183,7 @@ endef # $(2): destination header define copy-one-header $(2): $(1) - @echo "Header: $$@" + @echo -e ${CL_YLW}"Header:"${CL_RST}" $$@" $$(copy-file-to-new-target-with-cp) endef @@ -2192,7 +2192,7 @@ endef # $(2): destination file define copy-one-file $(2): $(1) | $(ACP) - @echo "Copy: $$@" + @echo -e ${CL_YLW}"Copy:"${CL_RST}" $$@" $$(copy-file-to-target) endef @@ -2213,7 +2213,7 @@ endef # $(2): destination file, must end with .xml. define copy-xml-file-checked $(2): $(1) | $(ACP) - @echo "Copy xml: $$@" + @echo -e ${CL_YLW}"Copy xml:"${CL_RST}" $$@" $(hide) xmllint $$< >/dev/null # Don't print the xml file to stdout. $$(copy-file-to-target) endef @@ -2271,19 +2271,19 @@ endef # Copy a prebuilt file to a target location. define transform-prebuilt-to-target -@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt: $(PRIVATE_MODULE) ($@)" +@echo -e "$(if $(PRIVATE_IS_HOST_MODULE),host,target) "${CL_CYN}"Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target) endef # Copy a prebuilt file to a target location, using zipalign on it. define transform-prebuilt-to-target-with-zipalign -@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt APK: $(PRIVATE_MODULE) ($@)" +@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) "${CL_CYN}"Prebuilt APK:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target-with-zipalign) endef # Copy a prebuilt file to a target location, stripping "# comment" comments. define transform-prebuilt-to-target-strip-comments -@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt: $(PRIVATE_MODULE) ($@)" +@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) "${CL_CYN}"Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target-strip-comments) endef @@ -2313,7 +2313,7 @@ endef ########################################################### define transform-generated-source -@echo -e ${CL_PFX}"target Generated:"${CL_RST}" $(PRIVATE_MODULE) <= $<" +@echo -e ${CL_GRN}"target Generated:"${CL_RST}" $(PRIVATE_MODULE) <= $<" @mkdir -p $(dir $@) $(hide) $(PRIVATE_CUSTOM_TOOL) endef diff --git a/core/distdir.mk b/core/distdir.mk index 51ec46efe..829951e8d 100644 --- a/core/distdir.mk +++ b/core/distdir.mk @@ -37,7 +37,7 @@ ifdef dist_goal define copy-one-dist-file $(3): $(2) $(2): $(1) - @echo "Dist: $$@" + @echo -e ${CL_YLW}"Dist:"${CL_RST}" $$@" $$(copy-file-to-new-target-with-cp) endef diff --git a/core/droiddoc.mk b/core/droiddoc.mk index 41f135c65..04161f9ae 100644 --- a/core/droiddoc.mk +++ b/core/droiddoc.mk @@ -162,7 +162,7 @@ $(full_target): \ $(full_java_lib_deps) \ $(LOCAL_MODULE_MAKEFILE) \ $(LOCAL_ADDITIONAL_DEPENDENCIES) - @echo Docs droiddoc: $(PRIVATE_OUT_DIR) + @echo -e ${CL_YLW}"Docs droiddoc:"${CL_RST}" $(PRIVATE_OUT_DIR)" $(hide) mkdir -p $(dir $@) $(call prepare-doc-source-list,$(PRIVATE_SRC_LIST_FILE),$(PRIVATE_JAVA_FILES), \ $(PRIVATE_SOURCE_INTERMEDIATES_DIR) $(PRIVATE_ADDITIONAL_JAVA_DIR)) @@ -197,7 +197,7 @@ else ## ## $(full_target): $(full_src_files) $(full_java_lib_deps) - @echo Docs javadoc: $(PRIVATE_OUT_DIR) + @echo -e ${CL_YLW}"Docs javadoc:"${CL_RST}" $(PRIVATE_OUT_DIR)" @mkdir -p $(dir $@) $(call prepare-doc-source-list,$(PRIVATE_SRC_LIST_FILE),$(PRIVATE_JAVA_FILES), \ $(PRIVATE_SOURCE_INTERMEDIATES_DIR) $(PRIVATE_ADDITIONAL_JAVA_DIR)) @@ -237,7 +237,7 @@ ifeq ($(strip $(LOCAL_UNINSTALLABLE_MODULE)),) out_zip := $(OUT_DOCS)/$(LOCAL_MODULE)-docs.zip $(out_zip): PRIVATE_DOCS_DIR := $(out_dir) $(out_zip): $(full_target) - @echo Package docs: $@ + @echo -e ${CL_YLW}"Package docs:"${CL_RST}" $@" @rm -f $@ @mkdir -p $(dir $@) $(hide) ( F=$$(pwd)/$@ ; cd $(PRIVATE_DOCS_DIR) && zip -rq $$F * ) diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk index 38c0cbee8..aabde47e9 100644 --- a/core/dynamic_binary.mk +++ b/core/dynamic_binary.mk @@ -83,7 +83,7 @@ endif symbolic_input := $(relocation_packer_output) symbolic_output := $(my_unstripped_path)/$(my_installed_module_stem) $(symbolic_output) : $(symbolic_input) | $(ACP) - @echo "target Symbolic: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_GRN}"target Symbolic:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target) @@ -132,11 +132,11 @@ else # use cp(1) instead. ifneq ($(LOCAL_ACP_UNAVAILABLE),true) $(strip_output): $(strip_input) | $(ACP) - @echo "target Unstripped: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_GRN}"target Unstripped:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target) else $(strip_output): $(strip_input) - @echo "target Unstripped: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_GRN}"target Unstripped:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target-with-cp) endif endif # my_strip_module diff --git a/core/host_java_library.mk b/core/host_java_library.mk index 47189d7d0..2a2fc7de2 100644 --- a/core/host_java_library.mk +++ b/core/host_java_library.mk @@ -61,7 +61,7 @@ $(full_classes_emma_jar) : $(full_classes_compiled_jar) | $(EMMA_JAR) $(transform-classes.jar-to-emma) $(built_javalib_jar) : $(full_classes_emma_jar) - @echo Copying: $@ + @echo -e ${CL_YLW}"Copying:"${CL_RST}" $@" $(hide) $(ACP) -fp $< $@ else # LOCAL_EMMA_INSTRUMENT @@ -70,7 +70,6 @@ full_classes_compiled_jar := $(built_javalib_jar) endif # LOCAL_EMMA_INSTRUMENT $(full_classes_compiled_jar): PRIVATE_JAVAC_DEBUG_FLAGS := -g - # The layers file allows you to enforce a layering between java packages. # Run build/tools/java-layers.py for more details. layers_file := $(addprefix $(LOCAL_PATH)/, $(LOCAL_JAVA_LAYERS_FILE)) diff --git a/core/java.mk b/core/java.mk index bac5ca7d8..3dc8ffcf6 100644 --- a/core/java.mk +++ b/core/java.mk @@ -358,7 +358,7 @@ ifdef full_classes_jar # PRIVATE_ vars to be preserved. $(full_classes_stubs_jar): PRIVATE_SOURCE_FILE := $(full_classes_jar) $(full_classes_stubs_jar) : $(full_classes_jar) | $(ACP) - @echo Copying $(PRIVATE_SOURCE_FILE) + @echo -e ${CL_GRN}"Copying"${CL_RST}" $(PRIVATE_SOURCE_FILE)" $(hide) $(ACP) -fp $(PRIVATE_SOURCE_FILE) $@ ALL_MODULES.$(LOCAL_MODULE).STUBS := $(full_classes_stubs_jar) @@ -399,11 +399,11 @@ $(full_classes_compiled_jar): PRIVATE_JAVAC_DEBUG_FLAGS := -g ifneq ($(strip $(LOCAL_JARJAR_RULES)),) $(full_classes_jarjar_jar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES) $(full_classes_jarjar_jar): $(full_classes_compiled_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR) - @echo JarJar: $@ + @echo -e ${CL_GRN}"JarJar:"${CL_RST}" $@" $(hide) java -jar $(JARJAR) process $(PRIVATE_JARJAR_RULES) $< $@ else $(full_classes_jarjar_jar): $(full_classes_compiled_jar) | $(ACP) - @echo Copying: $@ + @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@" $(hide) $(ACP) -fp $< $@ endif @@ -426,13 +426,13 @@ $(full_classes_emma_jar): $(full_classes_jarjar_jar) | $(EMMA_JAR) else $(full_classes_emma_jar): $(full_classes_jarjar_jar) | $(ACP) - @echo Copying: $@ + @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@" $(copy-file-to-target) endif # Keep a copy of the jar just before proguard processing. $(full_classes_jar): $(full_classes_emma_jar) | $(ACP) - @echo Copying: $@ + @echo -e ${CL_GRN}"Copying:"${CL_GRN}" $@" $(hide) $(ACP) -fp $< $@ # Run proguard if necessary, otherwise just copy the file. @@ -561,7 +561,7 @@ $(built_dex_intermediate): $(full_classes_proguard_jar) $(DX) endif # LOCAL_JACK_ENABLED is disabled $(built_dex): $(built_dex_intermediate) | $(ACP) - @echo Copying: $@ + @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@" $(hide) mkdir -p $(dir $@) $(hide) rm -f $(dir $@)/classes*.dex $(hide) $(ACP) -fp $(dir $<)/classes*.dex $(dir $@) diff --git a/core/java_library.mk b/core/java_library.mk index 5a2d19bff..a954d870a 100644 --- a/core/java_library.mk +++ b/core/java_library.mk @@ -66,7 +66,7 @@ $(common_javalib.jar) : $(full_classes_proguard_jar) else $(common_javalib.jar) : $(full_classes_jar) endif - @echo "target Static Jar: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_GRN}"target Static Jar:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target) ifdef LOCAL_JACK_ENABLED @@ -82,7 +82,7 @@ $(common_javalib.jar): PRIVATE_DEX_FILE := $(built_dex) $(common_javalib.jar): PRIVATE_SOURCE_ARCHIVE := $(full_classes_jarjar_jar) $(common_javalib.jar): PRIVATE_DONT_DELETE_JAR_DIRS := $(LOCAL_DONT_DELETE_JAR_DIRS) $(common_javalib.jar) : $(built_dex) $(java_resource_sources) - @echo "target Jar: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_GRN}"target Jar:"${CL_RST}" $(PRIVATE_MODULE) ($@)" ifdef LOCAL_JACK_ENABLED $(create-empty-package) else @@ -105,7 +105,7 @@ else # ! boot jar $(built_odex): PRIVATE_MODULE := $(LOCAL_MODULE) # Use pattern rule - we may have multiple built odex files. $(built_odex) : $(dir $(LOCAL_BUILT_MODULE))% : $(common_javalib.jar) - @echo "Dexpreopt Jar: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_GRN}"Dexpreopt Jar:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(call dexpreopt-one-file,$<,$@) $(LOCAL_BUILT_MODULE) : $(common_javalib.jar) | $(ACP) diff --git a/core/main.mk b/core/main.mk index 5b6e1e9e7..76fab2a50 100644 --- a/core/main.mk +++ b/core/main.mk @@ -1020,7 +1020,7 @@ $(foreach module,$(sample_MODULES),$(eval $(call \ sample_ADDITIONAL_INSTALLED := \ $(filter-out $(modules_to_install) $(modules_to_check) $(ALL_PREBUILT),$(sample_MODULES)) samplecode: $(sample_APKS_COLLECTION) - @echo "Collect sample code apks: $^" + @echo -e ${CL_GRN}"Collect sample code apks:"${CL_RST}" $^" # remove apks that are not intended to be installed. rm -f $(sample_ADDITIONAL_INSTALLED) endif # samplecode in $(MAKECMDGOALS) @@ -1031,7 +1031,7 @@ findbugs: $(INTERNAL_FINDBUGS_HTML_TARGET) $(INTERNAL_FINDBUGS_XML_TARGET) .PHONY: clean clean: @rm -rf $(OUT_DIR)/* - @echo "Entire build directory removed." + @echo -e ${CL_GRN}"Entire build directory removed."${CL_RST} .PHONY: clobber clobber: clean @@ -1041,7 +1041,7 @@ clobber: clean #xxx scrape this from ALL_MODULE_NAME_TAGS .PHONY: modules modules: - @echo "Available sub-modules:" + @echo -e ${CL_GRN}"Available sub-modules:"${CL_RST} @echo "$(call module-names-for-tag-list,$(ALL_MODULE_TAGS))" | \ tr -s ' ' '\n' | sort -u | $(COLUMN) diff --git a/core/notice_files.mk b/core/notice_files.mk index 43a543527..184d62cda 100644 --- a/core/notice_files.mk +++ b/core/notice_files.mk @@ -62,7 +62,7 @@ installed_notice_file := $($(my_prefix)OUT_NOTICE_FILES)/src/$(module_installed_ $(installed_notice_file): PRIVATE_INSTALLED_MODULE := $(module_installed_filename) $(installed_notice_file): $(notice_file) - @echo Notice file: $< -- $@ + @echo -e ${CL_CYN}Notice file:${CL_RST} $< -- $@ $(hide) mkdir -p $(dir $@) $(hide) cat $< > $@ diff --git a/core/pdk_config.mk b/core/pdk_config.mk index 262b50ecd..148c370cf 100644 --- a/core/pdk_config.mk +++ b/core/pdk_config.mk @@ -116,7 +116,7 @@ endif endif $(_pdk_fusion_stamp) : $(PDK_FUSION_PLATFORM_ZIP) - @echo "Unzip $(dir $@) <- $<" + @echo -e ${CL_YLW}"Unzip"${CL_RST}" $(dir $@) <- $<" $(hide) rm -rf $(dir $@) && mkdir -p $(dir $@) $(hide) unzip -qo $< -d $(dir $@) $(call split-long-arguments,-touch,$(_pdk_fusion_files)) diff --git a/core/static_java_library.mk b/core/static_java_library.mk index 9b7b46a4a..764ab89ea 100644 --- a/core/static_java_library.mk +++ b/core/static_java_library.mk @@ -122,7 +122,7 @@ $(R_file_stamp): PRIVATE_MANIFEST_PACKAGE_NAME := $(R_file_stamp): PRIVATE_MANIFEST_INSTRUMENTATION_FOR := $(R_file_stamp) : $(all_resources) $(full_android_manifest) $(AAPT) $(framework_res_package_export_deps) - @echo "target R.java/Manifest.java: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_YLW}"target R.java/Manifest.java:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(create-resource-java-files) $(hide) find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name R.java | xargs cat > $@ diff --git a/core/tasks/apicheck.mk b/core/tasks/apicheck.mk index 683a075ff..f109527cb 100644 --- a/core/tasks/apicheck.mk +++ b/core/tasks/apicheck.mk @@ -76,9 +76,9 @@ $(eval $(call check-api, \ .PHONY: update-public-api update-public-api: $(INTERNAL_PLATFORM_API_FILE) | $(ACP) - @echo Copying current.txt + @echo -e ${CL_GRN}"Copying current.txt"${CL_RST} $(hide) $(ACP) $(INTERNAL_PLATFORM_API_FILE) frameworks/base/api/current.txt - @echo Copying removed.txt + @echo -e ${CL_GRN}"Copying removed.txt"${CL_RST} $(hide) $(ACP) $(INTERNAL_PLATFORM_REMOVED_API_FILE) frameworks/base/api/removed.txt update-api : update-public-api diff --git a/core/tasks/collect_gpl_sources.mk b/core/tasks/collect_gpl_sources.mk index 30ba62b60..fc03f48b7 100644 --- a/core/tasks/collect_gpl_sources.mk +++ b/core/tasks/collect_gpl_sources.mk @@ -17,7 +17,7 @@ gpl_source_tgz := $(call intermediates-dir-for,PACKAGING,gpl_source,HOST,COMMON) # FORCE since we can't know whether any of the sources changed $(gpl_source_tgz): PRIVATE_PATHS := $(sort $(patsubst %/, %, $(dir $(ALL_GPL_MODULE_LICENSE_FILES)))) $(gpl_source_tgz) : $(ALL_GPL_MODULE_LICENSE_FILES) FORCE - @echo Package gpl sources: $@ + @echo -e ${CL_GRN}"Package gpl sources:"${CL_RST}" $@" @rm -rf $(dir $@) && mkdir -p $(dir $@) $(hide) tar cfz $@ --exclude ".git*" $(PRIVATE_PATHS) diff --git a/core/tasks/product-graph.mk b/core/tasks/product-graph.mk index db2cf7137..9641f3f31 100644 --- a/core/tasks/product-graph.mk +++ b/core/tasks/product-graph.mk @@ -134,7 +134,7 @@ $(foreach p,$(really_all_products), \ ) $(products_pdf): $(products_graph) - @echo Product graph PDF: $@ + @echo -e ${CL_GRN}"Product graph PDF:"${CL_RST}" $@" dot -Tpdf -Nshape=box -o $@ $< $(products_svg): $(products_graph) $(product_debug_files) diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk index 5ac9b7d47..620c50cd0 100644 --- a/core/tasks/sdk-addon.mk +++ b/core/tasks/sdk-addon.mk @@ -105,7 +105,7 @@ $(full_target): PRIVATE_DOCS_DIRS := $(addprefix $(OUT_DOCS)/, $(doc_modules)) $(full_target): PRIVATE_STAGING_DIR := $(call append-path,$(staging),$(addon_dir_leaf)) $(full_target): $(sdk_addon_deps) | $(ACP) - @echo Packaging SDK Addon: $@ + @echo -e ${CL_GRN}"Packaging SDK Addon:"${CL_RST}" $@" $(hide) mkdir -p $(PRIVATE_STAGING_DIR)/docs $(hide) for d in $(PRIVATE_DOCS_DIRS); do \ $(ACP) -r $$d $(PRIVATE_STAGING_DIR)/docs ;\ diff --git a/tools/apicheck/Android.mk b/tools/apicheck/Android.mk index 1674a17e9..1505c8d7b 100644 --- a/tools/apicheck/Android.mk +++ b/tools/apicheck/Android.mk @@ -32,7 +32,7 @@ include $(BUILD_SYSTEM)/base_rules.mk $(LOCAL_BUILT_MODULE): $(HOST_OUT_JAVA_LIBRARIES)/doclava$(COMMON_JAVA_PACKAGE_SUFFIX) $(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/etc/apicheck | $(ACP) - @echo "Copy: $(PRIVATE_MODULE) ($@)" + @echo -e ${CL_CYN}"Copy:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-new-target) $(hide) chmod 755 $@ From 596066e440fce57680b76bb6a65da9a6b5abb206 Mon Sep 17 00:00:00 2001 From: Diogo Ferreira Date: Sun, 18 Mar 2012 21:18:29 +0000 Subject: [PATCH 007/309] roomservice: Add lightweight dependencies to repositories Roomservice can already fetch your slim_ without the need for a manifest entry. However, when working with common repositories, there is no way of actually fetching them without adding to the manifest. This patch introduces a lightweight dependency system. Each repository can have a slim.dependencies in the following json format: [ { "repository": "repository_name_on_cm_organization" "target_path": "target/path" }, ... ] For instance, for cm_anzu I need android_device_semc_msm7x30-common and android_device_semc_mogami-common. I would add both to slim.dependencies as follows: [ { "repository": "android_device_semc_msm7x30-common", "target_path": "device/semc/msm7x30-common" }, { "repository": "android_device_semc_mogami-common", "target_path": "device/semc/mogami-common" } ] Roomservice would then fetch the anzu repository, parse the dependency files and add/fetch/sync these additional repositories if they don't exist already. This also adds pretty printing to the output xml. Change-Id: I9cc847adfc717a06439bc6094213ed6492343158 roomservice: Add branch support to slim.dependencies Allow the slim.dependencies entries to provide an optional "branch" for the repository dependencies. Added to fully support http://wiki.cyanogenmod.com/wiki/Integrated_kernel_building Change-Id: I35b51920d296afa329411af6172c7bd9aeef4af8 roomservice: Fill in dependencies for already-deployed repositories Change-Id: I01fd408c9c4bfa78097c7f848b2556d2b2b180f3 roomservice: Extend dependency-checks to devices in main manifest CM currently keeps devices inherited from AOSP in the main manifest, so take that into account as well when checking device paths Change-Id: I9663f283617f237428b4eaa0cd60b5de2b86a7b9 make compatible with github v3 api Change-Id: Iff6f1f9099cdc5d2b49e04000b5fe3d04aa5d7e4 Fixed build for full-eng Previously Traceback (most recent call last): File "build/tools/roomservice.py", line 153, in repo_path = get_from_manifest(device) NameError: name 'device' is not defined ** Don't have a product spec for: 'full' ** Do you have the right repo manifest? Now ============================================ PLATFORM_VERSION_CODENAME=REL PLATFORM_VERSION=4.0.4 TARGET_PRODUCT=full TARGET_BUILD_VARIANT=eng TARGET_BUILD_TYPE=release TARGET_BUILD_APPS= TARGET_ARCH=arm TARGET_ARCH_VARIANT=armv7-a HOST_ARCH=x86 HOST_OS=linux HOST_BUILD_TYPE=release BUILD_ID=IMM76L ============================================ Change-Id: Ib513705aba9a7a52a971ab64102ecbe9fddfb97a roomservice: Bump github request per_page to 100 Change the number of repos per page from the default 30 to 100. We seem to be hitting the rate limit on the jenkins server. Change-Id: Ie733feaa0414cbfebb7efcfc1e24d94e1e466d1b roomservice: Add support for netrc Change-Id: I1f5e11e40125abd0c4e4c8d8294d4fc09bfdc30a roomservice: Handle missing netrc file Change-Id: If981fe79dc3e2191434301239b0cd585be8b4730 roomservice.py: Verbose error when a branch is missing. Also add ROOMSERVICE_BRANCHES environment variable to use fallback branches. Change-Id: I3c2b1d79fc185c1f1e1d658e5ca4f78e688780e2 roomservice.py: Fixups around fallback branches not being used by dependencies when ROOMSERVICE_BRANCHES is defined. Change-Id: Ifb42a023cae5f62ac8f9cf7832125b91b431169c roomservice: Allow following up tag references This is now needed for release builds Change-Id: I8c5f87341059b3b15ee853312b71df73790ad0d8 build: local_manifest.xml deprecated, use local_manifests Patch Set 2:- Use roomservice.xml instead of slim.xml. Change-Id: I3d8a6ef3907b92808662cbba912cea5ed38d0bde Fix fallback branch search in roomservice If you provided a fallback branch to roomservice via the ROOMSERVICE_BRANCHES environment var the branch search would fail if the device repo had any tags. Fixed this by appending the tag search results to the branch search results instead of overwriting them Change-Id: I73a11af1500bd04e346f08ec3f83454502f3a169 roomservice: Fix wrong assumption about path of active manifest Change-Id: Id740ff4b848e6ccbfd658be4846197b8ca519237 roomservice: When validating the presence of a repo, check main manifest too Change-Id: If680536484074b473458723d93e783d074d7f669 roomservice: Bump devices per page to 200 Limit was reached again when attempting to lunch various projects, such as steelhead. Change-Id: I2f3b9705e07e1e47b86857aeb383cf7c99fcdbdc Signed-off-by: William Roberts roomservice: Fix assumptions about the branch naming We can't just split from the last slash anymore, since we're using those to distinguish the stabilization branches Change-Id: Ia175dd317f508e99b275b56e9c83bd4729a75ddb roomservice: Add recursive dependencies Dependency repositories can now have dependencies themselves Change-Id: I33a28709170da52bc98f4a62387927e3a11b2450 --- envsetup.sh | 2 + tools/roomservice.py | 299 +++++++++++++++++++++++++++++++++++++------ 2 files changed, 262 insertions(+), 39 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 4c5991c9c..ee9f5d732 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -595,6 +595,8 @@ function lunch() build/tools/roomservice.py $product popd > /dev/null check_product $product + else + build/tools/roomservice.py $product true fi if [ $? -ne 0 ] then diff --git a/tools/roomservice.py b/tools/roomservice.py index 3ae6be0e7..b8b505f83 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -1,55 +1,276 @@ #!/usr/bin/env python +# Copyright (C) 2012-2013, The SlimRoms Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import sys import urllib2 import json +import re +import netrc, base64 from xml.etree import ElementTree product = sys.argv[1]; -device = product[product.index("_") + 1:] -print "Device %s not found. Attempting to retrieve device repository from SlimRoms Github (http://github.com/SlimRoms)." % device + +if len(sys.argv) > 2: + depsonly = sys.argv[2] +else: + depsonly = None + +try: + device = product[product.index("_") + 1:] +except: + device = product + +if not depsonly: + print "Device %s not found. Attempting to retrieve device repository from SlimRoms Github (http://github.com/SlimRoms)." % device repositories = [] +try: + authtuple = netrc.netrc().authenticators("api.github.com") + + if authtuple: + githubauth = base64.encodestring('%s:%s' % (authtuple[0], authtuple[2])).replace('\n', '') + else: + githubauth = None +except: + githubauth = None + +def add_auth(githubreq): + if githubauth: + githubreq.add_header("Authorization","Basic %s" % githubauth) + page = 1 -while True: - result = json.loads(urllib2.urlopen("http://github.com/api/v2/json/repos/show/SlimRoms?page=%d" % page).read()) - if len(result['repositories']) == 0: +while not depsonly: + githubreq = urllib2.Request("https://api.github.com/users/SlimRoms/repos?per_page=200&page=%d" % page) + add_auth(githubreq) + result = json.loads(urllib2.urlopen(githubreq).read()) + if len(result) == 0: break - repositories = repositories + result['repositories'] + for res in result: + repositories.append(res) page = page + 1 -for repository in repositories: - repo_name = repository['name'] - if repo_name.startswith("android_device_") and repo_name.endswith("_" + device): - print "Found repository: %s" % repository['name'] - manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "") - - try: - lm = ElementTree.parse(".repo/slim_manifest.xml") - lm = lm.getroot() - except: - lm = ElementTree.Element("manifest") - - for child in lm.getchildren(): - if child.attrib['name'].endswith("_" + device): - print "Duplicate device '%s' found in slim_manifest.xml." % child.attrib['name'] - sys.exit() - - repo_path = "device/%s/%s" % (manufacturer, device) - project = ElementTree.Element("project", attrib = { "path": repo_path, "remote": "github", "name": "SlimRoms/%s" % repository['name'] }) +local_manifests = r'.repo/local_manifests' +if not os.path.exists(local_manifests): os.makedirs(local_manifests) + +def exists_in_tree(lm, repository): + for child in lm.getchildren(): + if child.attrib['name'].endswith(repository): + return True + return False + +# in-place prettyprint formatter +def indent(elem, level=0): + i = "\n" + level*" " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + indent(elem, level+1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + +def get_default_revision(): + m = ElementTree.parse(".repo/manifest.xml") + d = m.findall('default')[0] + r = d.get('revision') + return r.replace('refs/heads/', '').replace('refs/tags/', '') + +def get_from_manifest(devicename): + try: + lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") + lm = lm.getroot() + except: + lm = ElementTree.Element("manifest") + + for localpath in lm.findall("project"): + if re.search("android_device_.*_%s$" % device, localpath.get("name")): + return localpath.get("path") + + # Devices originally from AOSP are in the main manifest... + try: + mm = ElementTree.parse(".repo/manifest.xml") + mm = mm.getroot() + except: + mm = ElementTree.Element("manifest") + + for localpath in mm.findall("project"): + if re.search("android_device_.*_%s$" % device, localpath.get("name")): + return localpath.get("path") + + return None + +def is_in_manifest(projectname): + try: + lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") + lm = lm.getroot() + except: + lm = ElementTree.Element("manifest") + + for localpath in lm.findall("project"): + if localpath.get("name") == projectname: + return 1 + + ## Search in main manifest, too + try: + lm = ElementTree.parse(".repo/manifest.xml") + lm = lm.getroot() + except: + lm = ElementTree.Element("manifest") + + for localpath in lm.findall("project"): + if localpath.get("name") == projectname: + return 1 + + return None + +def add_to_manifest(repositories, fallback_branch = None): + try: + lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") + lm = lm.getroot() + except: + lm = ElementTree.Element("manifest") + + for repository in repositories: + repo_name = repository['repository'] + repo_target = repository['target_path'] + if exists_in_tree(lm, repo_name): + print 'SlimRoms/%s already exists' % (repo_name) + continue + + print 'Adding dependency: SlimRoms/%s -> %s' % (repo_name, repo_target) + project = ElementTree.Element("project", attrib = { "path": repo_target, + "remote": "github", "name": "SlimRoms/%s" % repo_name }) + + if 'branch' in repository: + project.set('revision',repository['branch']) + elif fallback_branch: + print "Using fallback branch %s for %s" % (fallback_branch, repo_name) + project.set('revision', fallback_branch) + else: + print "Using default branch for %s" % repo_name + lm.append(project) - - raw_xml = ElementTree.tostring(lm) - raw_xml = '\n' + raw_xml - - f = open('.repo/slim_manifest.xml', 'w') - f.write(raw_xml) - f.close() - - print "Syncing repository to retrieve project." - os.system('repo sync %s' % repo_path) - print "Done!" - sys.exit() - -print "Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your slim_manifest.xml." % device + + indent(lm, 0) + raw_xml = ElementTree.tostring(lm) + raw_xml = '\n' + raw_xml + + f = open('.repo/local_manifests/slim_manifest.xml', 'w') + f.write(raw_xml) + f.close() + +def fetch_dependencies(repo_path, fallback_branch = None): + print 'Looking for dependencies' + dependencies_path = repo_path + '/slim.dependencies' + syncable_repos = [] + + if os.path.exists(dependencies_path): + dependencies_file = open(dependencies_path, 'r') + dependencies = json.loads(dependencies_file.read()) + fetch_list = [] + + for dependency in dependencies: + if not is_in_manifest("SlimRoms/%s" % dependency['repository']): + fetch_list.append(dependency) + syncable_repos.append(dependency['target_path']) + + dependencies_file.close() + + if len(fetch_list) > 0: + print 'Adding dependencies to manifest' + add_to_manifest(fetch_list, fallback_branch) + else: + print 'Dependencies file not found, bailing out.' + + if len(syncable_repos) > 0: + print 'Syncing dependencies' + os.system('repo sync %s' % ' '.join(syncable_repos)) + + for deprepo in syncable_repos: + fetch_dependencies(deprepo) + +def has_branch(branches, revision): + return revision in [branch['name'] for branch in branches] + +if depsonly: + repo_path = get_from_manifest(device) + if repo_path: + fetch_dependencies(repo_path) + else: + print "Trying dependencies-only mode on a non-existing device tree?" + + sys.exit() + +else: + for repository in repositories: + repo_name = repository['name'] + if repo_name.startswith("android_device_") and repo_name.endswith("_" + device): + print "Found repository: %s" % repository['name'] + + manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "") + + default_revision = get_default_revision() + print "Default revision: %s" % default_revision + print "Checking branch info" + githubreq = urllib2.Request(repository['branches_url'].replace('{/branch}', '')) + add_auth(githubreq) + result = json.loads(urllib2.urlopen(githubreq).read()) + + ## Try tags, too, since that's what releases use + if not has_branch(result, default_revision): + githubreq = urllib2.Request(repository['tags_url'].replace('{/tag}', '')) + add_auth(githubreq) + result.extend (json.loads(urllib2.urlopen(githubreq).read())) + + repo_path = "device/%s/%s" % (manufacturer, device) + adding = {'repository':repo_name,'target_path':repo_path} + + fallback_branch = None + if not has_branch(result, default_revision): + if os.getenv('ROOMSERVICE_BRANCHES'): + fallbacks = filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')) + for fallback in fallbacks: + if has_branch(result, fallback): + print "Using fallback branch: %s" % fallback + fallback_branch = fallback + break + + if not fallback_branch: + print "Default revision %s not found in %s. Bailing." % (default_revision, repo_name) + print "Branches found:" + for branch in [branch['name'] for branch in result]: + print branch + print "Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches." + sys.exit() + + add_to_manifest([adding], fallback_branch) + + print "Syncing repository to retrieve project." + os.system('repo sync %s' % repo_path) + print "Repository synced!" + + fetch_dependencies(repo_path, fallback_branch) + print "Done" + sys.exit() + +print "Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your local_manifests/slim_manifest.xml." % device + From 404f4282a13ad8f31a9885ab659d7bfc1d3653d8 Mon Sep 17 00:00:00 2001 From: Pawit Pornkitprasan Date: Sat, 28 Jan 2012 09:05:16 +0700 Subject: [PATCH 008/309] Disable dataroaming by default The property is removed so that the property in vendor/cm can take effect Change-Id: Iefadc1a6e71cbf92ecb7363c953f4b5a82064446 --- target/product/full_base_telephony.mk | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk index 9a2c63a8d..4aa441536 100644 --- a/target/product/full_base_telephony.mk +++ b/target/product/full_base_telephony.mk @@ -20,8 +20,7 @@ # entirely appropriate to inherit from for on-device configurations. PRODUCT_PROPERTY_OVERRIDES := \ - keyguard.no_require_sim=true \ - ro.com.android.dataroaming=true + keyguard.no_require_sim=true PRODUCT_COPY_FILES := \ device/generic/goldfish/data/etc/apns-conf.xml:system/etc/apns-conf.xml \ From 58ea8b693925d42ed1d301b2a05df45713f1040c Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Fri, 19 Aug 2011 20:37:12 +0100 Subject: [PATCH 009/309] eat: Automated install of fresh builds Needs: 1 - a build 2 - a connected device Change-Id: I24820b984e79430b7af7ccedc171fc69269c0a32 envsetup: eat safely Change-Id: I2d8b52206213d8f4d8dcce3518cadb2be59e78d8 eat more safely, if there is something to eat Check for zip file presence before trying to eat Change-Id: I7913be619d189bbd723263edad878d6d05b8a5a7 Fixed eat not waiting for device if adb server was not started $(adb get-state) returned unexpected starting server message causing the if statement to fail. Calling "adb start-server" beforehand remedies that. Patch Set 2: $(adb get-state) returns 'unknown' if Clockwork recovery is running on the device look for /sbin/recovery and 'pass' the state test also adb wait-for-device will result in endless loop, replace with until loop using with get-state OR 'recovery' tests Patch Set 3: fix the initial adb state test, use AND instead of OR Patch Set 4: Rebased and prevent "device not found" message from spamming the screen by redirecting error output to null Change-Id: I2d41b8853567cde80bf7fc08b5e4f0ad5ba1fdf5 Signed-off-by: Firerat Updated for CM9 variables eat: Adapt to new file naming scheme Change-Id: I101e333918bc215bebcc594c97bdd68307c90aaa eat: Adapt to new storage paths Change-Id: Ic060304275c2d19858636c84740b3f60dfdc6193 envsetup: Make eat restart adbd as root before pushing the recovery command Without adbd running as root we can't write to /cache/recovery and eat fails with a permission denied. Change-Id: I015b50f29c001605cdc1a0088f68296b62809b56 Eat: Option to specify directory of zip in recovery mode It pushes to /mnt/sdcard0, which may be /emmc/ in recovery. Now the user can specify another path other than /sdcard/ where the zip is Change-Id: Ie39615132ae39d9f6b304a3049b2395b2a2af182 fix "eat" Assume user 0 is the active one when flashing... Change-Id: Ibdc28ef49f8023e786ee82c9e42c221409dfd6bb eat: Use /cache if possible * Check the size of /cache and use it if possible. Change-Id: I72fecf3f8d2311c04e63fe81a7ca9a8a6a40419d --- envsetup.sh | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index ee9f5d732..35ea33a57 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -696,6 +696,58 @@ function tapas() printconfig } +function eat() +{ + if [ "$OUT" ] ; then + MODVERSION=`sed -n -e'/ro\.cm\.version/s/.*=//p' $OUT/system/build.prop` + ZIPFILE=cm-$MODVERSION.zip + ZIPPATH=$OUT/$ZIPFILE + if [ ! -f $ZIPPATH ] ; then + echo "Nothing to eat" + return 1 + fi + adb start-server # Prevent unexpected starting server message from adb get-state in the next line + if [ $(adb get-state) != device -a $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then + echo "No device is online. Waiting for one..." + echo "Please connect USB and/or enable USB debugging" + until [ $(adb get-state) = device -o $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do + sleep 1 + done + echo "Device Found.." + fi + # if adbd isn't root we can't write to /cache/recovery/ + adb root + sleep 1 + adb wait-for-device + SZ=`stat -c %s $ZIPPATH` + CACHESIZE=`adb shell busybox df -PB1 /cache | grep /cache | tr -s ' ' | cut -d ' ' -f 4` + if [ $CACHESIZE -gt $SZ ]; + then + PUSHDIR=/cache/ + DIR=cache + else + PUSHDIR=/storage/sdcard0/ + # Optional path for sdcard0 in recovery + [ -z "$1" ] && DIR=sdcard/0 || DIR=$1 + fi + echo "Pushing $ZIPFILE to $PUSHDIR" + if adb push $ZIPPATH $PUSHDIR ; then + cat << EOF > /tmp/command +--update_package=/$DIR/$ZIPFILE +EOF + if adb push /tmp/command /cache/recovery/ ; then + echo "Rebooting into recovery for installation" + adb reboot recovery + fi + rm /tmp/command + fi + else + echo "Nothing to eat" + return 1 + fi + return $? +} + function gettop { local TOPFILE=build/core/envsetup.mk From da145b68e9e00c6f36c7c4a74cda445472640c53 Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Sat, 25 Feb 2012 14:24:20 -0800 Subject: [PATCH 010/309] Note that update-api is a lie. Change-Id: Idc2d92630e94ccbd60b5d5447c3762db8e5096f8 --- core/apicheck_msg_current.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/apicheck_msg_current.txt b/core/apicheck_msg_current.txt index 440e7f886..2e065aeff 100644 --- a/core/apicheck_msg_current.txt +++ b/core/apicheck_msg_current.txt @@ -9,6 +9,10 @@ To make these errors go away, you have two choices: 2) You can update current.txt by executing the following command: make update-api + ^^^^^^^^^^^^^^^^^^ + NO. NO. STOP BEING LAZY. SERIOUSLY. + DO NOT DO THIS in CM. THIS IS A LIE. IT WILL BREAK THINGS. + To submit the revised current.txt to the main Android repository, you will need approval. ****************************** From ccb6e91b34dcd18a7f88385a7020bfcf093dd22e Mon Sep 17 00:00:00 2001 From: Warren Togami Date: Sat, 3 Mar 2012 23:10:09 -1000 Subject: [PATCH 011/309] Modular backuptool.sh. Executes backup and restore methods defined in arbitrary /system/addon.d/*.sh scripts. * Copy backuptool.functions alongside backuptool.sh. * Delete both from /system/bin as they are not useful there. Patch Series ============ http://review.cyanogenmod.com/#change,13265 CyanogenMod/android_build * edify generator http://review.cyanogenmod.com/#change,13266 CyanogenMod/android_system_core * permissions on /system/addon.d http://review.cyanogenmod.com/#change,13267 CyanogenMod/android_vendor_cm * 50-cm.sh reference backup script * modular backuptool.sh * support backuptool.functions used by /system/addon.d/*.sh scripts Change-Id: I26b4907d28f49c69627699d2accd2f0fa2d1b112 --- tools/releasetools/edify_generator.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 1d0700c09..cdf282674 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -135,8 +135,13 @@ def AssertSomeBootloader(self, *bootloaders): def RunBackup(self, command): self.script.append('package_extract_file("system/bin/backuptool.sh", "/tmp/backuptool.sh");') + self.script.append('package_extract_file("system/bin/backuptool.functions", "/tmp/backuptool.functions");') self.script.append('set_perm(0, 0, 0777, "/tmp/backuptool.sh");') + self.script.append('set_perm(0, 0, 0644, "/tmp/backuptool.functions");') self.script.append(('run_program("/tmp/backuptool.sh", "%s");' % command)) + if command == "restore": + self.script.append('delete("/system/bin/backuptool.sh");') + self.script.append('delete("/system/bin/backuptool.functions");') def ShowProgress(self, frac, dur): """Update the progress bar, advancing it over 'frac' over the next From e9b526c48897cd973f77d76ffdd318dd6370c249 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 20 May 2012 22:10:30 -0700 Subject: [PATCH 012/309] build: Stop installing the sample APN list * Sample APN list is always being installed, we don't want this in CM. Change-Id: I74b97f3af545c4a7568d3001e1435c63cfbc7de8 --- target/product/full_base_telephony.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk index 4aa441536..4d6fce7a6 100644 --- a/target/product/full_base_telephony.mk +++ b/target/product/full_base_telephony.mk @@ -23,7 +23,6 @@ PRODUCT_PROPERTY_OVERRIDES := \ keyguard.no_require_sim=true PRODUCT_COPY_FILES := \ - device/generic/goldfish/data/etc/apns-conf.xml:system/etc/apns-conf.xml \ frameworks/native/data/etc/handheld_core_hardware.xml:system/etc/permissions/handheld_core_hardware.xml $(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base.mk) From 87821e23b689145e308ebd1ddc76972c0e2e4ba6 Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Sun, 20 Nov 2011 21:14:29 -0800 Subject: [PATCH 013/309] Add recoveryzip target. Import recovery building/porting tools. echo args Change-Id: Ib346a27d94ccab3e992fe3bc3be9e0010e7b89a0 mkvendor.sh: fixes for Android 4.2. Change-Id: Id7ff9ef03700d595734811ccf868371ffb024384 mkvendor: fix echoed build commands cm.mk.template uses PRODUCT_NAME := cm___DEVICE__ Change-Id: I063fc4b93d90ebb034637bf3e92d83c77aa439ca --- core/Makefile | 1 - tools/device/AndroidBoard.mk.template | 8 ++ tools/device/AndroidProducts.mk.template | 2 + tools/device/BoardConfig.mk.template | 25 +++++ tools/device/device.mk.template | 24 +++++ tools/device/makerecoveries.sh | 69 ++++++++++++++ tools/device/mkrecoveryzip.sh | 97 +++++++++++++++++++ tools/device/mkvendor.sh | 115 +++++++++++++++++++++++ tools/device/recovery.fstab.template | 10 ++ tools/device/system.prop.template | 3 + 10 files changed, 353 insertions(+), 1 deletion(-) create mode 100644 tools/device/AndroidBoard.mk.template create mode 100644 tools/device/AndroidProducts.mk.template create mode 100644 tools/device/BoardConfig.mk.template create mode 100644 tools/device/device.mk.template create mode 100644 tools/device/makerecoveries.sh create mode 100755 tools/device/mkrecoveryzip.sh create mode 100755 tools/device/mkvendor.sh create mode 100644 tools/device/recovery.fstab.template create mode 100644 tools/device/system.prop.template diff --git a/core/Makefile b/core/Makefile index 77d0e0ca7..94c124947 100644 --- a/core/Makefile +++ b/core/Makefile @@ -46,7 +46,6 @@ unique_product_copy_files_destinations := $(foreach cf,$(unique_product_copy_files_pairs), \ $(eval _src := $(call word-colon,1,$(cf))) \ $(eval _dest := $(call word-colon,2,$(cf))) \ - $(call check-product-copy-files,$(cf)) \ $(if $(filter $(unique_product_copy_files_destinations),$(_dest)), \ $(info PRODUCT_COPY_FILES $(cf) ignored.), \ $(eval _fulldest := $(call append-path,$(PRODUCT_OUT),$(_dest))) \ diff --git a/tools/device/AndroidBoard.mk.template b/tools/device/AndroidBoard.mk.template new file mode 100644 index 000000000..55a36d523 --- /dev/null +++ b/tools/device/AndroidBoard.mk.template @@ -0,0 +1,8 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +ALL_PREBUILT += $(INSTALLED_KERNEL_TARGET) + +# include the non-open-source counterpart to this file +-include vendor/__MANUFACTURER__/__DEVICE__/AndroidBoardVendor.mk diff --git a/tools/device/AndroidProducts.mk.template b/tools/device/AndroidProducts.mk.template new file mode 100644 index 000000000..f31c5bf79 --- /dev/null +++ b/tools/device/AndroidProducts.mk.template @@ -0,0 +1,2 @@ +PRODUCT_MAKEFILES := \ + $(LOCAL_DIR)/device___DEVICE__.mk diff --git a/tools/device/BoardConfig.mk.template b/tools/device/BoardConfig.mk.template new file mode 100644 index 000000000..66538e35a --- /dev/null +++ b/tools/device/BoardConfig.mk.template @@ -0,0 +1,25 @@ +USE_CAMERA_STUB := true + +# inherit from the proprietary version +-include vendor/__MANUFACTURER__/__DEVICE__/BoardConfigVendor.mk + +TARGET_ARCH := arm +TARGET_NO_BOOTLOADER := true +TARGET_BOARD_PLATFORM := unknown +TARGET_CPU_ABI := armeabi +TARGET_BOOTLOADER_BOARD_NAME := __DEVICE__ + +BOARD_KERNEL_CMDLINE := __CMDLINE__ +BOARD_KERNEL_BASE := 0x__BASE__ +BOARD_KERNEL_PAGESIZE := __PAGE_SIZE__ + +# fix this up by examining /proc/mtd on a running device +BOARD_BOOTIMAGE_PARTITION_SIZE := 0x00380000 +BOARD_RECOVERYIMAGE_PARTITION_SIZE := 0x00480000 +BOARD_SYSTEMIMAGE_PARTITION_SIZE := 0x08c60000 +BOARD_USERDATAIMAGE_PARTITION_SIZE := 0x105c0000 +BOARD_FLASH_BLOCK_SIZE := 131072 + +TARGET_PREBUILT_KERNEL := device/__MANUFACTURER__/__DEVICE__/kernel + +BOARD_HAS_NO_SELECT_BUTTON := true diff --git a/tools/device/device.mk.template b/tools/device/device.mk.template new file mode 100644 index 000000000..91ffdc951 --- /dev/null +++ b/tools/device/device.mk.template @@ -0,0 +1,24 @@ +$(call inherit-product, $(SRC_TARGET_DIR)/product/languages_full.mk) + +# The gps config appropriate for this device +$(call inherit-product, device/common/gps/gps_us_supl.mk) + +$(call inherit-product-if-exists, vendor/__MANUFACTURER__/__DEVICE__/__DEVICE__-vendor.mk) + +DEVICE_PACKAGE_OVERLAYS += device/__MANUFACTURER__/__DEVICE__/overlay + + +ifeq ($(TARGET_PREBUILT_KERNEL),) + LOCAL_KERNEL := device/__MANUFACTURER__/__DEVICE__/kernel +else + LOCAL_KERNEL := $(TARGET_PREBUILT_KERNEL) +endif + +PRODUCT_COPY_FILES += \ + $(LOCAL_KERNEL):kernel + +$(call inherit-product, build/target/product/full.mk) + +PRODUCT_BUILD_PROP_OVERRIDES += BUILD_UTC_DATE=0 +PRODUCT_NAME := full___DEVICE__ +PRODUCT_DEVICE := __DEVICE__ diff --git a/tools/device/makerecoveries.sh b/tools/device/makerecoveries.sh new file mode 100644 index 000000000..f561c81f6 --- /dev/null +++ b/tools/device/makerecoveries.sh @@ -0,0 +1,69 @@ +if [ -z "$1" ] +then + echo "Please provide a lunch option." + return +fi + +PRODUCTS=$1 + +for product in $PRODUCTS +do + echo $product +done + +echo $(echo $PRODUCTS | wc -w) Products + +unset PUBLISHED_RECOVERIES + +MCP=$(which mcp) +if [ -z "$MCP" ] +then + NO_UPLOAD=true +fi + +function mcpguard () { + if [ -z $NO_UPLOAD ] + then + mcp $1 $2 + md5sum $1 > $1.md5sum.txt + mcp $1.md5sum.txt $2.md5sum.txt + fi +} + +VERSION=$(cat bootable/recovery/Android.mk | grep RECOVERY_VERSION | grep RECOVERY_NAME | awk '{ print $4 }' | sed s/v//g) +echo Recovery Version: $VERSION + +for lunchoption in $PRODUCTS +do + lunch $lunchoption + if [ -z $NO_CLEAN ] + then + rm -rf $OUT/obj/EXECUTABLES/recovery_intermediates + rm -rf $OUT/recovery* + rm -rf $OUT/root* + fi + DEVICE_NAME=$(echo $TARGET_PRODUCT | sed s/koush_// | sed s/aosp_// | sed s/motorola// | sed s/huawei_// | sed s/htc_// | sed s/_us// | sed s/cyanogen_// | sed s/generic_// | sed s/full_//) + PRODUCT_NAME=$(basename $OUT) + make -j16 recoveryzip + RESULT=$? + if [ $RESULT != "0" ] + then + echo build error! + break + fi + mcpguard $OUT/recovery.img recoveries/recovery-clockwork-$VERSION-$DEVICE_NAME.img + mcpguard $OUT/utilities/update.zip recoveries/recovery-clockwork-$VERSION-$DEVICE_NAME.zip + + if [ -f "ROMManagerManifest/devices.rb" ] + then + pushd ROMManagerManifest + ruby devices.rb $DEVICE_NAME $VERSION $lunchoption + popd + fi +done + +for published_recovery in $PUBLISHED_RECOVERIES +do + echo $published_recovery +done + diff --git a/tools/device/mkrecoveryzip.sh b/tools/device/mkrecoveryzip.sh new file mode 100755 index 000000000..e6fae3708 --- /dev/null +++ b/tools/device/mkrecoveryzip.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +OUT=$1 +SIGNAPK=$2 + +if [ -z "$OUT" -o -z "$SIGNAPK" ] +then + echo "Android build environment not detected." + exit 1 +fi + +ANDROID_ROOT=$(pwd) +OUT=$ANDROID_ROOT/$OUT +SIGNAPK=$ANDROID_ROOT/$SIGNAPK + +pushd . > /dev/null 2> /dev/null + +UTILITIES_DIR=$OUT/utilities +mkdir -p $UTILITIES_DIR +RECOVERY_DIR=$UTILITIES_DIR/recovery +rm -rf $RECOVERY_DIR +mkdir -p $RECOVERY_DIR +cd $RECOVERY_DIR +cp -R $OUT/recovery/root/etc etc +cp -R $OUT/recovery/root/sbin sbin +cp -R $OUT/recovery/root/res res +SCRIPT_DIR=META-INF/com/google/android +mkdir -p $SCRIPT_DIR +cp $OUT/system/bin/updater $SCRIPT_DIR/update-binary + + +UPDATER_SCRIPT=$SCRIPT_DIR/updater-script +rm -f $UPDATER_SCRIPT +touch $UPDATER_SCRIPT +mkdir -p $(dirname $UPDATER_SCRIPT) + +FILES= +SYMLINKS= + +for file in $(find .) +do + +if [ -d $file ] +then + continue +fi + +META_INF=$(echo $file | grep META-INF) +if [ ! -z $META_INF ] +then + continue; +fi + +if [ -h $file ] +then + SYMLINKS=$SYMLINKS' '$file +elif [ -f $file ] +then + FILES=$FILES' '$file +fi +done + + +echo 'ui_print("Replacing stock recovery with ClockworkMod recovery...");' >> $UPDATER_SCRIPT + +echo 'delete("sbin/recovery");' >> $UPDATER_SCRIPT +echo 'package_extract_file("sbin/recovery", "/sbin/recovery");' >> $UPDATER_SCRIPT +echo 'set_perm(0, 0, 0755, "/sbin/recovery");' >> $UPDATER_SCRIPT +echo 'symlink("recovery", "/sbin/busybox");' >> $UPDATER_SCRIPT + +echo 'run_program("/sbin/busybox", "sh", "-c", "busybox rm -f /etc ; busybox mkdir -p /etc;");' >> $UPDATER_SCRIPT + +for file in $FILES +do + echo 'delete("'$(echo $file | sed s!\\./!!g)'");' >> $UPDATER_SCRIPT + echo 'package_extract_file("'$(echo $file | sed s!\\./!!g)'", "'$(echo $file | sed s!\\./!/!g)'");' >> $UPDATER_SCRIPT + if [ -x $file ] + then + echo 'set_perm(0, 0, 0755, "'$(echo $file | sed s!\\./!/!g)'");' >> $UPDATER_SCRIPT + fi +done + +for file in $SYMLINKS +do + echo 'symlink("'$(readlink $file)'", "'$(echo $file | sed s!\\./!/!g)'");' >> $UPDATER_SCRIPT +done + +echo 'set_perm_recursive(0, 2000, 0755, 0755, "/sbin");' >> $UPDATER_SCRIPT +echo 'run_program("/sbin/busybox", "sh", "-c", "/sbin/killrecovery.sh");' >> $UPDATER_SCRIPT +rm -f $UTILITIES_DIR/unsigned.zip +rm -f $UTILITIES_DIR/update.zip +echo zip -ry $UTILITIES_DIR/unsigned.zip . -x $SYMLINKS '*\[*' '*\[\[*' +zip -ry $UTILITIES_DIR/unsigned.zip . -x $SYMLINKS '*\[*' '*\[\[*' +java -jar $SIGNAPK -w $ANDROID_ROOT/build/target/product/security/testkey.x509.pem $ANDROID_ROOT/build/target/product/security/testkey.pk8 $UTILITIES_DIR/unsigned.zip $UTILITIES_DIR/update.zip + +echo Recovery FakeFlash is now available at $OUT/utilities/update.zip +popd > /dev/null 2> /dev/null diff --git a/tools/device/mkvendor.sh b/tools/device/mkvendor.sh new file mode 100755 index 000000000..2742a8b79 --- /dev/null +++ b/tools/device/mkvendor.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +function usage +{ + echo Usage: + echo " $(basename $0) manufacturer device [boot.img]" + echo " The boot.img argument is the extracted recovery or boot image." + echo " The boot.img argument should not be provided for devices" + echo " that have non standard boot images (ie, Samsung)." + echo + echo Example: + echo " $(basename $0) motorola sholes ~/Downloads/recovery-sholes.img" + exit 0 +} + +MANUFACTURER=$1 +DEVICE=$2 +BOOTIMAGE=$3 + +UNPACKBOOTIMG=$(which unpackbootimg) + +echo Arguments: $@ + +if [ -z "$MANUFACTURER" ] +then + usage +fi + +if [ -z "$DEVICE" ] +then + usage +fi + +ANDROID_TOP=$(dirname $0)/../../../ +pushd $ANDROID_TOP > /dev/null +ANDROID_TOP=$(pwd) +popd > /dev/null + +TEMPLATE_DIR=$(dirname $0) +pushd $TEMPLATE_DIR > /dev/null +TEMPLATE_DIR=$(pwd) +popd > /dev/null + +DEVICE_DIR=$ANDROID_TOP/device/$MANUFACTURER/$DEVICE + +if [ ! -z "$BOOTIMAGE" ] +then + if [ -z "$UNPACKBOOTIMG" ] + then + echo unpackbootimg not found. Is your android build environment set up and have the host tools been built? + exit 0 + fi + + BOOTIMAGEFILE=$(basename $BOOTIMAGE) + + echo Output will be in $DEVICE_DIR + mkdir -p $DEVICE_DIR + + TMPDIR=/tmp/$(whoami)/bootimg + rm -rf $TMPDIR + mkdir -p $TMPDIR + cp $BOOTIMAGE $TMPDIR + pushd $TMPDIR > /dev/null + unpackbootimg -i $BOOTIMAGEFILE > /dev/null + mkdir ramdisk + pushd ramdisk > /dev/null + gunzip -c ../$BOOTIMAGEFILE-ramdisk.gz | cpio -i + popd > /dev/null + BASE=$(cat $TMPDIR/$BOOTIMAGEFILE-base) + CMDLINE=$(cat $TMPDIR/$BOOTIMAGEFILE-cmdline) + PAGESIZE=$(cat $TMPDIR/$BOOTIMAGEFILE-pagesize) + export SEDCMD="s#__CMDLINE__#$CMDLINE#g" + echo $SEDCMD > $TMPDIR/sedcommand + cp $TMPDIR/$BOOTIMAGEFILE-zImage $DEVICE_DIR/kernel + popd > /dev/null +else + mkdir -p $DEVICE_DIR + touch $DEVICE_DIR/kernel + BASE=10000000 + CMDLINE=no_console_suspend + PAGESIZE=00000800 + export SEDCMD="s#__CMDLINE__#$CMDLINE#g" + echo $SEDCMD > $TMPDIR/sedcommand +fi + +for file in $(find $TEMPLATE_DIR -name '*.template') +do + OUTPUT_FILE=$DEVICE_DIR/$(basename $(echo $file | sed s/\\.template//g)) + cat $file | sed s/__DEVICE__/$DEVICE/g | sed s/__MANUFACTURER__/$MANUFACTURER/g | sed -f $TMPDIR/sedcommand | sed s/__BASE__/$BASE/g | sed s/__PAGE_SIZE__/$PAGESIZE/g > $OUTPUT_FILE +done + +if [ ! -z "$TMPDIR" ] +then + RECOVERY_FSTAB=$TMPDIR/ramdisk/etc/recovery.fstab + if [ -f "$RECOVERY_FSTAB" ] + then + cp $RECOVERY_FSTAB $DEVICE_DIR/recovery.fstab + fi +fi + + +mv $DEVICE_DIR/device.mk $DEVICE_DIR/device_$DEVICE.mk + +echo Creating initial git repository. +pushd $DEVICE_DIR +git init +git add . +git commit -a -m "mkvendor.sh: Initial commit of $DEVICE" +popd + +echo Done! +echo Use the following command to set up your build environment: +echo ' 'lunch slim_$DEVICE-eng +echo And use the follwowing command to build a recovery: +echo ' '. build/tools/device/makerecoveries.sh slim_$DEVICE-eng diff --git a/tools/device/recovery.fstab.template b/tools/device/recovery.fstab.template new file mode 100644 index 000000000..41fb92e8b --- /dev/null +++ b/tools/device/recovery.fstab.template @@ -0,0 +1,10 @@ +# mount point fstype device [device2] + +/boot mtd boot +/cache yaffs2 cache +/data yaffs2 userdata +/misc mtd misc +/recovery mtd recovery +/sdcard vfat /dev/block/mmcblk0p1 /dev/block/mmcblk0 +/system yaffs2 system +/sd-ext ext4 /dev/block/mmcblk0p2 diff --git a/tools/device/system.prop.template b/tools/device/system.prop.template new file mode 100644 index 000000000..411392939 --- /dev/null +++ b/tools/device/system.prop.template @@ -0,0 +1,3 @@ +# +# system.prop for __DEVICE__ +# From afeb06c69c629bf468f1070647e85cbcdff163f9 Mon Sep 17 00:00:00 2001 From: Andreas Schneider Date: Fri, 31 Aug 2012 22:45:52 +0200 Subject: [PATCH 014/309] build: Remove wallpapers from full_base. They are already in the slim common config for the full phone. Change-Id: I02fc0ea73e92ae8b19931d05e6783e3cfa166af2 --- target/product/full_base.mk | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/target/product/full_base.mk b/target/product/full_base.mk index 65bdf0f1b..7d19685f1 100644 --- a/target/product/full_base.mk +++ b/target/product/full_base.mk @@ -27,16 +27,6 @@ PRODUCT_PACKAGES := \ libwnndict \ WAPPushManager -PRODUCT_PACKAGES += \ - Galaxy4 \ - HoloSpiralWallpaper \ - LiveWallpapers \ - LiveWallpapersPicker \ - MagicSmokeWallpapers \ - NoiseField \ - PhaseBeam \ - PhotoTable - # Additional settings used in all AOSP builds PRODUCT_PROPERTY_OVERRIDES := \ ro.config.ringtone=Ring_Synth_04.ogg \ From 235f7c591a3c55730595e517cad4b9b5076df7f4 Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Wed, 11 Apr 2012 10:11:52 -0700 Subject: [PATCH 015/309] add slim.mk.template file for mkvendor.sh Change-Id: Iba150a5350a7a7dbc9e3444d71d4ff293d51f3d7 fix template for mkvendor.sh Change-Id: I2c78ed013053efa0c399e022502fd9ec4a1de8e6 mkvendor.sh: force armeabi-v7a Change-Id: I9f29eec73a13144b47ff7b1db1d59cb1e5a4ef64 mkvendor.sh: typo Change-Id: I8cc2303fde7c815384e09f65b6d4a1116ac29ee4 mkvendor.sh: more fixes Change-Id: I0c641d42a96e44bb9170475d5de21cb8644e4d3d BoardConfig.mk.template: Add TARGET_CPU_VARIANT Change-Id: I8cd74a794e0d1a6818b5479cf7f5264e87d565f3 BoardConfig.mk.template: Add TARGET_CPU_SMP This one is easy forget if not in the template Change-Id: If65e557a661ecc81c6bfc434525b3c9937ff988e --- tools/device/BoardConfig.mk.template | 8 +++++++- tools/device/slim.mk.template | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 tools/device/slim.mk.template diff --git a/tools/device/BoardConfig.mk.template b/tools/device/BoardConfig.mk.template index 66538e35a..617673f98 100644 --- a/tools/device/BoardConfig.mk.template +++ b/tools/device/BoardConfig.mk.template @@ -6,7 +6,13 @@ USE_CAMERA_STUB := true TARGET_ARCH := arm TARGET_NO_BOOTLOADER := true TARGET_BOARD_PLATFORM := unknown -TARGET_CPU_ABI := armeabi +TARGET_CPU_ABI := armeabi-v7a +TARGET_CPU_ABI2 := armeabi +TARGET_ARCH_VARIANT := armv7-a-neon +TARGET_CPU_VARIANT := cortex-a7 +TARGET_CPU_SMP := true +ARCH_ARM_HAVE_TLS_REGISTER := true + TARGET_BOOTLOADER_BOARD_NAME := __DEVICE__ BOARD_KERNEL_CMDLINE := __CMDLINE__ diff --git a/tools/device/slim.mk.template b/tools/device/slim.mk.template new file mode 100644 index 000000000..6af93d7e9 --- /dev/null +++ b/tools/device/slim.mk.template @@ -0,0 +1,18 @@ +## Specify phone tech before including full_phone +$(call inherit-product, vendor/slim/config/gsm.mk) + +# Release name +PRODUCT_RELEASE_NAME := __DEVICE__ + +# Inherit some common SLIM stuff. +$(call inherit-product, vendor/slim/config/common_full_phone.mk) + +# Inherit device configuration +$(call inherit-product, device/__MANUFACTURER__/__DEVICE__/device___DEVICE__.mk) + +## Device identifier. This must come after all inclusions +PRODUCT_DEVICE := __DEVICE__ +PRODUCT_NAME := slim___DEVICE__ +PRODUCT_BRAND := __MANUFACTURER__ +PRODUCT_MODEL := __DEVICE__ +PRODUCT_MANUFACTURER := __MANUFACTURER__ From ee4e3ca047be21aae6a00d4ef351e2c7fc3f2fe3 Mon Sep 17 00:00:00 2001 From: Sebastian Schmidt Date: Sun, 15 Jul 2012 15:25:49 +0200 Subject: [PATCH 016/309] Use less heap space for dex on 32-bit build hosts Run dex with -JXmx1024M instead of 2048M if running on a 32-bit host. Surprisingly this appears to also work for framework and services; if not, a little fine-tuning will be needed. This does not change anything for non-32-bit hosts. Change-Id: I1d87c3e394acc934af6f7770ea54ba265a1df838 --- core/definitions.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/definitions.mk b/core/definitions.mk index 302a2b14b..acf11a808 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -1991,7 +1991,7 @@ define transform-classes.jar-to-dex @mkdir -p $(dir $@) $(hide) rm -f $(dir $@)classes*.dex $(hide) $(DX) \ - $(if $(findstring windows,$(HOST_OS)),,-JXms16M -JXmx2048M) \ + $(if $(findstring windows,$(HOST_OS)),,-JXms16M -JXmx$(if $(call streq,$(HOST_BITS),32),1024,2048)M) \ --dex --output=$(dir $@) \ $(if $(NO_OPTIMIZE_DX), \ --no-optimize) \ From 6a9ae9f84f8d5589d80053be2fa2a6a6fff9afc8 Mon Sep 17 00:00:00 2001 From: Austen Dicken Date: Sun, 15 Jul 2012 16:28:05 -0500 Subject: [PATCH 017/309] update ota_from_target_files to handle mounting/unmounting for backupscript backupscript should not be mounting/unmounting itself as it makes other scripts have unexpected results (such as modelid_cfg, which expects /system to be mounted) instead have the ota script handle the mounting/unmounting Change-Id: I94511f4147c624d975cb3ecbeaa8b0e98f63437c --- tools/releasetools/edify_generator.py | 6 ++++++ tools/releasetools/ota_from_target_files | 2 ++ 2 files changed, 8 insertions(+) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index cdf282674..231200aa1 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -201,6 +201,12 @@ def Mount(self, mount_point, mount_options_by_format=""): p.mount_point, mount_flags)) self.mounts.add(p.mount_point) + def Unmount(self, mount_point): + """Unmount the partiiton with the given mount_point.""" + if mount_point in self.mounts: + self.mounts.remove(mount_point) + self.script.append('unmount("%s");' % (mount_point,)) + def UnpackPackageDir(self, src, dst): """Unpack a given directory from the OTA package into the given destination directory.""" diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 18283fa01..676369ace 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -593,7 +593,9 @@ else if get_stage("%(bcb_dev)s") == "3/3" then device_specific.FullOTA_InstallBegin() if OPTIONS.backuptool: + script.Mount("/system") script.RunBackup("backup") + script.Unmount("/system") system_progress = 0.75 From 5abb788884385574d83da659eb6812d649c7d7d2 Mon Sep 17 00:00:00 2001 From: Daniel Bateman Date: Sat, 4 Aug 2012 03:48:09 -0500 Subject: [PATCH 018/309] envsetup: Add some more commands * mmp: Like mm but pushes the built files afterward. * mmmp: Like mmm but pushes the built files afterward. These may be useful when doing debugging on a individual library, since it builds the library and pushes the new library to the device. Examples: mmmp -B device/samsung/vibrantmtd/libgps mmmp -B packages/apps/Torch Thanks: tpruvot for revisions Change-Id: Ia6bdd2b63797ea86e164af69496b70ea6d7c5156 --- envsetup.sh | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 35ea33a57..f1a3e70db 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -9,6 +9,8 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - mmm: Builds all of the modules in the supplied directories, but not their dependencies. To limit the modules being built use the syntax: mmm dir/:target1,target2. - mma: Builds all of the modules in the current directory, and their dependencies. +- mmp: Builds all of the modules in the current directory and pushes them to the device. +- mmmp: Builds all of the modules in the supplied directories and pushes them to the device. - mmma: Builds all of the modules in the supplied directories, and their dependencies. - cgrep: Greps on all local C/C++ files. - ggrep: Greps on all local Gradle files. @@ -748,6 +750,59 @@ EOF return $? } +# Credit for color strip sed: http://goo.gl/BoIcm +function mmmp() +{ + if [[ $# < 1 || $1 == "--help" || $1 == "-h" ]]; then + echo "mmmp [make arguments] " + return 1 + fi + + # Get product name from cm_ + PRODUCT=`echo $TARGET_PRODUCT | tr "_" "\n" | tail -n 1` + + adb start-server # Prevent unexpected starting server message from adb get-state in the next line + if [ $(adb get-state) != device -a $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then + echo "No device is online. Waiting for one..." + echo "Please connect USB and/or enable USB debugging" + until [ $(adb get-state) = device -o $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do + sleep 1 + done + echo "Device Found.." + fi + + adb root &> /dev/null + sleep 0.3 + adb wait-for-device &> /dev/null + sleep 0.3 + adb remount &> /dev/null + + mmm $* | tee .log + + # Install: + LOC=$(cat .log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Install' | cut -d ':' -f 2) + + # Copy: + LOC=$LOC $(cat .log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Copy' | cut -d ':' -f 2) + + for FILE in $LOC; do + # Get target file name (i.e. system/bin/adb) + TARGET=$(echo $FILE | sed "s/\/$PRODUCT\//\n/" | tail -n 1) + + # Don't send files that are not in /system. + if ! echo $TARGET | egrep '^system\/' > /dev/null ; then + continue + else + echo "Pushing: $TARGET" + adb push $FILE $TARGET + fi + done + rm -f .log + return 0 +} + +alias mmp='mmmp .' + function gettop { local TOPFILE=build/core/envsetup.mk From ae5e749b2a74fc67d2936d49526487604637d4de Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Mon, 20 Aug 2012 11:00:51 -0700 Subject: [PATCH 019/309] build: Add Qualcomm's helper macros * This is used everywhere in Qualcomm's code. There's no reason we need to constantly replace it with uglier stuff in our Makefiles. Change-Id: I0183a338470ec96a38f356a47bae65a0d3fb2c95 --- core/main.mk | 3 + core/qcom_utils.mk | 191 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) create mode 100755 core/qcom_utils.mk diff --git a/core/main.mk b/core/main.mk index 76fab2a50..f7a9557f7 100644 --- a/core/main.mk +++ b/core/main.mk @@ -244,6 +244,9 @@ endif # Bring in standard build system definitions. include $(BUILD_SYSTEM)/definitions.mk +# Bring in Qualcomm helper macros +include $(BUILD_SYSTEM)/qcom_utils.mk + # Bring in dex_preopt.mk include $(BUILD_SYSTEM)/dex_preopt.mk diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk new file mode 100755 index 000000000..76cff81ad --- /dev/null +++ b/core/qcom_utils.mk @@ -0,0 +1,191 @@ +# vars for use by utils +empty := +space := $(empty) $(empty) +colon := $(empty):$(empty) +underscore := $(empty)_$(empty) + +# $(call match-word,w1,w2) +# checks if w1 == w2 +# How it works +# if (w1-w2 not empty or w2-w1 not empty) then not_match else match +# +# returns true or empty +#$(warning :$(1): :$(2): :$(subst $(1),,$(2)):) \ +#$(warning :$(2): :$(1): :$(subst $(2),,$(1)):) \ +# +define match-word +$(strip \ + $(if $(or $(subst $(1),$(empty),$(2)),$(subst $(2),$(empty),$(1))),,true) \ +) +endef + +# $(call find-word-in-list,w,wlist) +# finds an exact match of word w in word list wlist +# +# How it works +# fill wlist spaces with colon +# wrap w with colon +# search word w in list wl, if found match m, return stripped word w +# +# returns stripped word or empty +define find-word-in-list +$(strip \ + $(eval wl:= $(colon)$(subst $(space),$(colon),$(strip $(2)))$(colon)) \ + $(eval w:= $(colon)$(strip $(1))$(colon)) \ + $(eval m:= $(findstring $(w),$(wl))) \ + $(if $(m),$(1),) \ +) +endef + +# $(call match-word-in-list,w,wlist) +# does an exact match of word w in word list wlist +# How it works +# if the input word is not empty +# return output of an exact match of word w in wordlist wlist +# else +# return empty +# returns true or empty +define match-word-in-list +$(strip \ + $(if $(strip $(1)), \ + $(call match-word,$(call find-word-in-list,$(1),$(2)),$(strip $(1))), \ + ) \ +) +endef + +# $(call match-prefix,p,delim,w/wlist) +# matches prefix p in wlist using delimiter delim +# +# How it works +# trim the words in wlist w +# if find-word-in-list returns not empty +# return true +# else +# return empty +# +define match-prefix +$(strip \ + $(eval w := $(strip $(1)$(strip $(2)))) \ + $(eval text := $(patsubst $(w)%,$(1),$(3))) \ + $(if $(call match-word-in-list,$(1),$(text)),true,) \ +) +endef + +# ---- +# The following utilities are meant for board platform specific +# featurisation + +# $(call get-vendor-board-platforms,v) +# returns list of board platforms for vendor v +define get-vendor-board-platforms +$($(1)_BOARD_PLATFORMS) +endef + +# $(call is-board-platform,bp) +# returns true or empty +define is-board-platform +$(call match-word,$(1),$(TARGET_BOARD_PLATFORM)) +endef + +# $(call is-not-board-platform,bp) +# returns true or empty +define is-not-board-platform +$(if $(call match-word,$(1),$(TARGET_BOARD_PLATFORM)),,true) +endef + +# $(call is-board-platform-in-list,bpl) +# returns true or empty +define is-board-platform-in-list +$(call match-word-in-list,$(TARGET_BOARD_PLATFORM),$(1)) +endef + +# $(call is-vendor-board-platform,vendor) +# returns true or empty +define is-vendor-board-platform +$(strip \ + $(call match-word-in-list,$(TARGET_BOARD_PLATFORM),\ + $(call get-vendor-board-platforms,$(1)) \ + ) \ +) +endef + +# $(call is-chipset-in-board-platform,chipset) +# does a prefix match of chipset in TARGET_BOARD_PLATFORM +# uses underscore as a delimiter +# +# returns true or empty +define is-chipset-in-board-platform +$(call match-prefix,$(1),$(underscore),$(TARGET_BOARD_PLATFORM)) +endef + +# $(call is-chipset-prefix-in-board-platform,prefix) +# does a chipset prefix match in TARGET_BOARD_PLATFORM +# assumes '_' and 'a' as the delimiter to the chipset prefix +# +# How it works +# if ($(prefix)_ or $(prefix)a match in board platform) +# return true +# else +# return empty +# +define is-chipset-prefix-in-board-platform +$(strip \ + $(eval delim_a := $(empty)a$(empty)) \ + $(if \ + $(or \ + $(call match-prefix,$(1),$(delim_a),$(TARGET_BOARD_PLATFORM)), \ + $(call match-prefix,$(1),$(underscore),$(TARGET_BOARD_PLATFORM)), \ + ), \ + true, \ + ) \ +) +endef + +#---- +# The following utilities are meant for Android Code Name +# specific featurisation +# +# refer http://source.android.com/source/build-numbers.html +# for code names and associated sdk versions +CUPCAKE_SDK_VERSIONS := 3 +DONUT_SDK_VERSIONS := 4 +ECLAIR_SDK_VERSIONS := 5 6 7 +FROYO_SDK_VERSIONS := 8 +GINGERBREAD_SDK_VERSIONS := 9 10 +HONEYCOMB_SDK_VERSIONS := 11 12 13 +ICECREAM_SANDWICH_SDK_VERSIONS := 14 15 +JELLY_BEAN_SDK_VERSIONS := 16 + +# $(call is-android-codename,codename) +# codename is one of cupcake,donut,eclair,froyo,gingerbread,icecream +# please refer the $(codename)_SDK_VERSIONS declared above +define is-android-codename +$(strip \ + $(if \ + $(call match-word-in-list,$(PLATFORM_SDK_VERSION),$($(1)_SDK_VERSIONS)), \ + true, \ + ) \ +) +endef + +# $(call is-android-codename-in-list,cnlist) +# cnlist is combination/list of android codenames +define is-android-codename-in-list +$(strip \ + $(eval acn := $(empty)) \ + $(foreach \ + i,$(1),\ + $(eval acn += \ + $(if \ + $(call \ + match-word-in-list,\ + $(PLATFORM_SDK_VERSION),\ + $($(i)_SDK_VERSIONS)\ + ),\ + true,\ + )\ + )\ + ) \ + $(if $(strip $(acn)),true,) \ +) +endef From 6759c9400ba5c3c1c3430e6795efbaf0544b593f Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Thu, 20 Sep 2012 10:48:13 -0600 Subject: [PATCH 020/309] build: Pass OTAPACKAGE location to squisher Pass the OTAPACKAGE location so we don't have to guess what the package name will be. This fixes an issue when BUILD_NUMBER is defined in an environment with automated builds. Change-Id: Iab18b541de37c99d237aabc3cb7b2e9ab076617b --- core/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index 94c124947..5075d7129 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1681,10 +1681,10 @@ otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage ifneq ($(TARGET_CUSTOM_RELEASETOOL),) @echo -e ${CL_YLW}"Running custom releasetool..."${CL_RST} - $(hide) $(TARGET_CUSTOM_RELEASETOOL) + $(hide) OTAPACKAGE=$(PWD)/$(INTERNAL_OTA_PACKAGE_TARGET) $(TARGET_CUSTOM_RELEASETOOL) else @echo -e ${CL_YLW}"Running releasetool..."${CL_RST} - $(hide) ./vendor/cm/tools/squisher + $(hide) OTAPACKAGE=$(PWD)/$(INTERNAL_OTA_PACKAGE_TARGET) ./vendor/cm/tools/squisher endif endif # recovery_fstab is defined From 9bc6f5792b8943487c3cb8906d298ad5c64807c3 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Thu, 27 Sep 2012 18:11:25 +0530 Subject: [PATCH 021/309] envsetup: set OUT_DIR to an absolute path always OUT_DIR was set to $(TOPDIR)out previously, but $(TOPDIR) was null, so it was a relative path. This broke releasetools, inline kernel building, etc since they require absolute paths. Fix it so that it is set to $(shell readlink -f .)/out if $(TOPDIR) is null. Also remove hacks which checked if (OUT_DIR) was out and changed it to $(ANDROID_BUILD_TOP)/out to workaround the aforementioned problem. Change-Id: I459a3b1325a1bbea0565cd73f6acf160d4ed9b39 --- core/envsetup.mk | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/envsetup.mk b/core/envsetup.mk index bf044553b..5f43cf9d7 100644 --- a/core/envsetup.mk +++ b/core/envsetup.mk @@ -207,8 +207,12 @@ endif ifeq (,$(strip $(OUT_DIR))) ifeq (,$(strip $(OUT_DIR_COMMON_BASE))) +ifneq ($(TOPDIR),) OUT_DIR := $(TOPDIR)out else +OUT_DIR := $(shell readlink -f .)/out +endif +else OUT_DIR := $(OUT_DIR_COMMON_BASE)/$(notdir $(PWD)) endif endif From 22a0106563ce51216e0c008ffa180af8ff008ebd Mon Sep 17 00:00:00 2001 From: David Ferguson Date: Thu, 4 Oct 2012 10:15:58 -0400 Subject: [PATCH 022/309] build: work around missing readlink -f on Mac Change-Id: I5d56366cf33a2b02f1886c87815d00cff279779d --- core/envsetup.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/envsetup.mk b/core/envsetup.mk index 5f43cf9d7..eb19f4fba 100644 --- a/core/envsetup.mk +++ b/core/envsetup.mk @@ -210,7 +210,7 @@ ifeq (,$(strip $(OUT_DIR_COMMON_BASE))) ifneq ($(TOPDIR),) OUT_DIR := $(TOPDIR)out else -OUT_DIR := $(shell readlink -f .)/out +OUT_DIR := $(shell python -c 'import os,sys; print os.path.realpath(sys.argv[1])' .)/out endif else OUT_DIR := $(OUT_DIR_COMMON_BASE)/$(notdir $(PWD)) From afaadf0c6a801f7a0e5376a1a77534e1a114eb3e Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Thu, 1 Nov 2012 12:58:45 +0530 Subject: [PATCH 023/309] cleanbuild: add *.zip.md5sum to installclean_files Change-Id: I4b68bb9a8d421a54b008c5318fa82ed230bd6441 --- core/cleanbuild.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk index 4316ad6ba..31f65487a 100644 --- a/core/cleanbuild.mk +++ b/core/cleanbuild.mk @@ -194,6 +194,7 @@ installclean_files := \ $(PRODUCT_OUT)/*.xlb \ $(PRODUCT_OUT)/*.zip \ $(PRODUCT_OUT)/kernel \ + $(PRODUCT_OUT)/*.zip.md5sum \ $(PRODUCT_OUT)/data \ $(PRODUCT_OUT)/skin \ $(PRODUCT_OUT)/obj/APPS \ From b1fb2173e6c59b9dc1200944a65e2befc5a0337d Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 7 Dec 2012 00:21:36 -0800 Subject: [PATCH 024/309] build: Add board platforms to qcom_utils Change-Id: I85458167bf2b9c04b029d09b87bf791ff4cfd04d build: Use common name for QCOM 7K boards. This is needed to build QCOM HAL without external modifications. The device would now have to specify msm7x27 or msm7x30 instead of specifying the full name. Patch Set 2: Add msm7x27a into the list. Change-Id: I25018e397b5aad27fab5244731a574ae86752e17 Add new version to qcom_utils.mk Change-Id: I1ca1532fd4968450715ecd0cca7729e0e6d507ef Build: update qcom_utils.mk Add build macro to determine Android Release Adding platform sdk version 18 for JB MR2. Change-Id: I79aa831ddd335bd14bd777506c210acc5d9960f1 --- core/qcom_utils.mk | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk index 76cff81ad..71ba3ad40 100755 --- a/core/qcom_utils.mk +++ b/core/qcom_utils.mk @@ -1,3 +1,20 @@ +# Board platforms lists to be used for +# TARGET_BOARD_PLATFORM specific featurization +QCOM_BOARD_PLATFORMS := msm7x27 +QCOM_BOARD_PLATFORMS += msm7x27a +QCOM_BOARD_PLATFORMS += msm7x30 +QCOM_BOARD_PLATFORMS += msm8660 +QCOM_BOARD_PLATFORMS += msm8960 +QCOM_BOARD_PLATFORMS += msm8974 + +MSM7K_BOARD_PLATFORMS := msm7x30 +MSM7K_BOARD_PLATFORMS += msm7x27 +MSM7K_BOARD_PLATFORMS += msm7x27a +MSM7K_BOARD_PLATFORMS += msm7k + +QSD8K_BOARD_PLATFORMS := qsd8k + + # vars for use by utils empty := space := $(empty) $(empty) @@ -154,7 +171,17 @@ FROYO_SDK_VERSIONS := 8 GINGERBREAD_SDK_VERSIONS := 9 10 HONEYCOMB_SDK_VERSIONS := 11 12 13 ICECREAM_SANDWICH_SDK_VERSIONS := 14 15 -JELLY_BEAN_SDK_VERSIONS := 16 +JELLY_BEAN_SDK_VERSIONS := 16 17 18 + +# $(call is-platform-sdk-version-at-least,version) +# version is a numeric SDK_VERSION defined above +define is-platform-sdk-version-at-least +$(strip \ + $(if $(filter 1,$(shell echo "$$(( $(PLATFORM_SDK_VERSION) >= $(1) ))" )), \ + true, \ + ) \ +) +endef # $(call is-android-codename,codename) # codename is one of cupcake,donut,eclair,froyo,gingerbread,icecream From 440f5ecc49782903f5379fe5675691dbd62c5262 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sun, 23 Dec 2012 09:21:46 +0530 Subject: [PATCH 025/309] bacon: get rid of squisher Change-Id: I3cece868e56f85bfa0b31a62a51e6eab2070ab06 --- core/Makefile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/Makefile b/core/Makefile index 5075d7129..20c63dff9 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1676,16 +1676,17 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \ $(BUILT_TARGET_FILES_PACKAGE) $@ +SLIM_TARGET_PACKAGE := $(PRODUCT_OUT)/Slim-$(SLIM_VERSION).zip + .PHONY: otapackage bacon otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage ifneq ($(TARGET_CUSTOM_RELEASETOOL),) - @echo -e ${CL_YLW}"Running custom releasetool..."${CL_RST} - $(hide) OTAPACKAGE=$(PWD)/$(INTERNAL_OTA_PACKAGE_TARGET) $(TARGET_CUSTOM_RELEASETOOL) -else - @echo -e ${CL_YLW}"Running releasetool..."${CL_RST} - $(hide) OTAPACKAGE=$(PWD)/$(INTERNAL_OTA_PACKAGE_TARGET) ./vendor/cm/tools/squisher + $(error TARGET_CUSTOM_RELEASETOOL is deprecated) endif + $(hide) ln -f $(INTERNAL_OTA_PACKAGE_TARGET) $(SLIM_TARGET_PACKAGE) + $(hide) $(MD5SUM) $(SLIM_TARGET_PACKAGE) > $(SLIM_TARGET_PACKAGE).md5sum + @echo -e ${CL_CYN}"Package Complete: $(SLIM_TARGET_PACKAGE)"${CL_RST} endif # recovery_fstab is defined endif # TARGET_NO_KERNEL != true From 56a53e1527e2d372acb71b8d76ba43a38b5fb38d Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sat, 16 Mar 2013 20:00:17 +0530 Subject: [PATCH 026/309] envsetup: include bash completion script from vendor/slim Change-Id: Id90d84748df41d6626dfd46befabe0054c6d14e7 --- envsetup.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index f1a3e70db..419b1a3ad 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -285,13 +285,15 @@ function addcompletions() return fi - dir="sdk/bash_completion" + dirs="sdk/bash_completion vendor/slim/bash_completion" + for dir in $dirs; do if [ -d ${dir} ]; then for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do echo "including $f" . $f done fi + done } function choosetype() From 6c0112933dd5dc6862333a65d329bf2526cb6f18 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Mon, 25 Mar 2013 12:02:12 +0530 Subject: [PATCH 027/309] print SLIM_VERSION in build config Change-Id: Iead654e30798a11e636ec743683f3431d14a858c --- core/dumpvar.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/dumpvar.mk b/core/dumpvar.mk index 510bc7d08..b1810c595 100644 --- a/core/dumpvar.mk +++ b/core/dumpvar.mk @@ -67,6 +67,7 @@ HOST_OS_EXTRA:=$(shell python -c "import platform; print(platform.platform())") $(info ============================================) $(info PLATFORM_VERSION_CODENAME=$(PLATFORM_VERSION_CODENAME)) $(info PLATFORM_VERSION=$(PLATFORM_VERSION)) +$(info SLIM_VERSION=$(SLIM_VERSION)) $(info TARGET_PRODUCT=$(TARGET_PRODUCT)) $(info TARGET_BUILD_VARIANT=$(TARGET_BUILD_VARIANT)) $(info TARGET_BUILD_TYPE=$(TARGET_BUILD_TYPE)) From 3a4800f94fed20fabe06d704e42d4de9f3354533 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sat, 13 Apr 2013 07:19:52 -0700 Subject: [PATCH 028/309] eat: Eat using adb sideload * Requires new recovery and updated adbd Change-Id: I65990822ab63313701d74783d95611d128d90f94 --- envsetup.sh | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 419b1a3ad..eb0f1ee8b 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -723,28 +723,16 @@ function eat() adb root sleep 1 adb wait-for-device - SZ=`stat -c %s $ZIPPATH` - CACHESIZE=`adb shell busybox df -PB1 /cache | grep /cache | tr -s ' ' | cut -d ' ' -f 4` - if [ $CACHESIZE -gt $SZ ]; - then - PUSHDIR=/cache/ - DIR=cache - else - PUSHDIR=/storage/sdcard0/ - # Optional path for sdcard0 in recovery - [ -z "$1" ] && DIR=sdcard/0 || DIR=$1 - fi - echo "Pushing $ZIPFILE to $PUSHDIR" - if adb push $ZIPPATH $PUSHDIR ; then - cat << EOF > /tmp/command ---update_package=/$DIR/$ZIPFILE + cat << EOF > /tmp/command +--sideload EOF - if adb push /tmp/command /cache/recovery/ ; then - echo "Rebooting into recovery for installation" - adb reboot recovery - fi - rm /tmp/command + if adb push /tmp/command /cache/recovery/ ; then + echo "Rebooting into recovery for sideload installation" + adb reboot recovery + adb wait-for-sideload + adb sideload $ZIPPATH fi + rm /tmp/command else echo "Nothing to eat" return 1 From 677dc34c0887da1497e82c1fcf7a4984a19ac3ce Mon Sep 17 00:00:00 2001 From: nebkat Date: Fri, 28 Dec 2012 10:40:45 +0000 Subject: [PATCH 029/309] cout: move to $out Change-Id: I3b5bf8a5f62dcdf267662bb97a2073150152cfa6 --- envsetup.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index eb0f1ee8b..1ef689259 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -4,6 +4,7 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - lunch: lunch - - tapas: tapas [ ...] [arm|x86|mips|armv5|arm64|x86_64|mips64] [eng|userdebug|user] - croot: Changes directory to the top of the tree. +- cout: Changes directory to out. - m: Makes from the top of the tree. - mm: Builds all of the modules in the current directory, but not their dependencies. - mmm: Builds all of the modules in the supplied directories, but not their dependencies. @@ -1014,6 +1015,15 @@ function croot() fi } +function cout() +{ + if [ "$OUT" ]; then + cd $OUT + else + echo "Couldn't locate out directory. Try setting OUT." + fi +} + function cproj() { TOPFILE=build/core/envsetup.mk From f6798f19dbd5146736eee8c25ccf13750a239bf4 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sat, 4 May 2013 17:59:18 +0530 Subject: [PATCH 030/309] eat: get the proper MODVERSION * The version in system/build.prop might be different than what the zip is named on incremental builds. * Use SLIM_VERSION from the build system instead. Change-Id: If4508921d8fd05219a2f358a2397a13b247065bd Signed-off-by: Chirayu Desai --- envsetup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 1ef689259..52b125394 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -704,8 +704,8 @@ function tapas() function eat() { if [ "$OUT" ] ; then - MODVERSION=`sed -n -e'/ro\.cm\.version/s/.*=//p' $OUT/system/build.prop` - ZIPFILE=cm-$MODVERSION.zip + MODVERSION=$(get_build_var SLIM_VERSION) + ZIPFILE=slim-$MODVERSION.zip ZIPPATH=$OUT/$ZIPFILE if [ ! -f $ZIPPATH ] ; then echo "Nothing to eat" From a73fbb79d5c8cbe598990e56b8e6a91521b3c39f Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Wed, 1 May 2013 15:48:08 +0530 Subject: [PATCH 031/309] envsetup: use $(CURDIR) for getting current directory Change-Id: I5f00faf64ec31d86dd2e48ec038748ce8499380b --- core/envsetup.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/envsetup.mk b/core/envsetup.mk index eb19f4fba..08524b309 100644 --- a/core/envsetup.mk +++ b/core/envsetup.mk @@ -210,7 +210,7 @@ ifeq (,$(strip $(OUT_DIR_COMMON_BASE))) ifneq ($(TOPDIR),) OUT_DIR := $(TOPDIR)out else -OUT_DIR := $(shell python -c 'import os,sys; print os.path.realpath(sys.argv[1])' .)/out +OUT_DIR := $(CURDIR)/out endif else OUT_DIR := $(OUT_DIR_COMMON_BASE)/$(notdir $(PWD)) From c623485c649587c74bf815fddaf72ad69cabba99 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Wed, 5 Jun 2013 20:14:33 +0530 Subject: [PATCH 032/309] Quashed Commit of repopick repopick: initial commit Change-Id: Ie42c11d335da07f6d164a0fcb887011e5fd6dcf4 repopick: add -b shortcut option * useful for people like me who want to pick into temp branch so that the picks survive a repo sync * shortcut to "--start-branch auto --abandon-first --ignore-missing" Change-Id: Ia4d4309d27e46a15ff4b74525668d974f4251dcb repopick: handle additional error cases around subprocesses * gracefully error if the project path cannot be found * poll() was not a reliable determination of EOF on my Darwin-Python 2.7 system. Change-Id: I203c2a75820f8acc079a5c9751d1c04daf6f3a16 repopick: allow running from a subdir of ANDROID_BUILD_TOP Change-Id: I6dfcd5dfe700174ed87dc8627b23519c62b4cb27 envsetup: hmm repopick Change-Id: I483cb36721f31dbf1f67e46cbe8a306b2a9e2c15 Conflicts: envsetup.sh repopick: decode the result as UTF-8 Change-Id: I51f2defa861c86b51baa7bad1df1d9666b952160 repopick: gracefully handle empty/non-JSON server responses Change-Id: Idbabdbfb4a910de0ad405f02b2a84cf2bc9ef3dc repopick: remove the superfluous information from date * '2011-11-17 06:54:52.000000000' -> '2011-11-17 06:54:52' Change-Id: I73d37c9aba13d4be6b4d2d2fc0f2f83971a3e77b repopick: let's be nice to our servers :) * Prefer fetching changes from GitHub, and if that fails, silently fall-back to Gerrit. Change-Id: Ibf68d4b69a7e8dbee2adb8f7f4250340b8be629c repopick: skip a cherry pick if its already been merged * override this behavior with the -f argument Change-Id: I280b4945344e7483a192d1c9769c42b8fd2a2aef RepoPick : Add support for git pull Allow user to git pull patchsets and dependencies. Change-Id: If5520b45fe79836eac628b3caf0526f11f8953d9 (cherry picked from commit df646304bdcef329e3fe7c12b68107de1f4cd42a) repopick: allow specifying a topic to pick all commits from Change-Id: I4fb60120794a77986bf641de063a8d41f4f45a23 repopick: support specifying a range of commits to pick * for example: repopick 12345-12350 Change-Id: I21dd0ef5f19d7b3899fa40ec4c28a0e9dba8a4df repopick: handle variant hal branching Change-Id: Ib0dee19abc13a7fb8d8e42c09b73c1b4f35ca938 Allow repopick to cherry-pick a specific patch set Use 'repopick 123456/9' where '123456' is the change number and '9' is the desired patchset. Change-Id: I2d9f6939fbde50b2a6057b75d2e7f722be5a3e21 repopick: Catch errors on url load failure Exit gracefully if server cannot be reached or if it reports an error. Change-Id: I86a1a45d3a1f8dfdb49a0400cb728c965dbad8df repopick: Allow using a custom query Change-Id: I87e92d3cbfa35367d87f55c92a6d12a6d4282232 repopick: Allow the github fetch to fail * This is optional and done to save gerrit server bandwidth, however it may fail in cases where the 'github' remote is a mirror which doesn't sync the changes. * Let it try fetching from gerrit if fetching from github fails. Change-Id: I6d183ff83572d817d78633280d8b20e3efdaf8f0 --- envsetup.sh | 6 + tools/repopick.py | 414 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 420 insertions(+) create mode 100755 tools/repopick.py diff --git a/envsetup.sh b/envsetup.sh index 52b125394..f037e99f1 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -16,6 +16,7 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - cgrep: Greps on all local C/C++ files. - ggrep: Greps on all local Gradle files. - jgrep: Greps on all local Java files. +- repopick: Utility to fetch changes from Gerrit. - resgrep: Greps on all local res/*.xml files. - mangrep: Greps on all local AndroidManifest.xml files. - sepgrep: Greps on all local sepolicy files. @@ -1579,6 +1580,11 @@ function godir () { \cd $T/$pathname } +function repopick() { + T=$(gettop) + $T/build/tools/repopick.py $@ +} + # Force JAVA_HOME to point to java 1.7 if it isn't already set. # # Note that the MacOS path for java 1.7 includes a minor revision number (sigh). diff --git a/tools/repopick.py b/tools/repopick.py new file mode 100755 index 000000000..d14589694 --- /dev/null +++ b/tools/repopick.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python +# +# Copyright (C) 2013-14 The CyanogenMod Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Run repopick.py -h for a description of this utility. +# + +from __future__ import print_function + +import sys +import json +import os +import subprocess +import re +import argparse +import textwrap + +try: + # For python3 + import urllib.error + import urllib.request +except ImportError: + # For python2 + import imp + import urllib2 + urllib = imp.new_module('urllib') + urllib.error = urllib2 + urllib.request = urllib2 + +# Parse the command line +parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ + repopick.py is a utility to simplify the process of cherry picking + patches from SlimRoms Gerrit instance. + + Given a list of change numbers, repopick will cd into the project path + and cherry pick the latest patch available. + + With the --start-branch argument, the user can specify that a branch + should be created before cherry picking. This is useful for + cherry-picking many patches into a common branch which can be easily + abandoned later (good for testing other's changes.) + + The --abandon-first argument, when used in conjuction with the + --start-branch option, will cause repopick to abandon the specified + branch in all repos first before performing any cherry picks.''')) +parser.add_argument('change_number', nargs='*', help='change number to cherry pick. Use {change number}/{patchset number} to get a specific revision.') +parser.add_argument('-i', '--ignore-missing', action='store_true', help='do not error out if a patch applies to a missing directory') +parser.add_argument('-s', '--start-branch', nargs=1, help='start the specified branch before cherry picking') +parser.add_argument('-a', '--abandon-first', action='store_true', help='before cherry picking, abandon the branch specified in --start-branch') +parser.add_argument('-b', '--auto-branch', action='store_true', help='shortcut to "--start-branch auto --abandon-first --ignore-missing"') +parser.add_argument('-q', '--quiet', action='store_true', help='print as little as possible') +parser.add_argument('-v', '--verbose', action='store_true', help='print extra information to aid in debug') +parser.add_argument('-f', '--force', action='store_true', help='force cherry pick even if commit has been merged') +parser.add_argument('-p', '--pull', action='store_true', help='execute pull instead of cherry-pick') +parser.add_argument('-t', '--topic', help='pick all commits from a specified topic') +parser.add_argument('-Q', '--query', help='pick all commits using the specified query') +args = parser.parse_args() +if args.start_branch == None and args.abandon_first: + parser.error('if --abandon-first is set, you must also give the branch name with --start-branch') +if args.auto_branch: + args.abandon_first = True + args.ignore_missing = True + if not args.start_branch: + args.start_branch = ['auto'] +if args.quiet and args.verbose: + parser.error('--quiet and --verbose cannot be specified together') +if len(args.change_number) > 0: + if args.topic or args.query: + parser.error('cannot specify a topic (or query) and change number(s) together') +if args.topic and args.query: + parser.error('cannot specify a topic and a query together') +if len(args.change_number) == 0 and not args.topic and not args.query: + parser.error('must specify at least one commit id or a topic or a query') + +# Helper function to determine whether a path is an executable file +def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + +# Implementation of Unix 'which' in Python +# +# From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python +def which(program): + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + return None + +# Simple wrapper for os.system() that: +# - exits on error if !can_fail +# - prints out the command if --verbose +# - suppresses all output if --quiet +def execute_cmd(cmd, can_fail=False): + if args.verbose: + print('Executing: %s' % cmd) + if args.quiet: + cmd = cmd.replace(' && ', ' &> /dev/null && ') + cmd = cmd + " &> /dev/null" + if os.system(cmd): + if not args.verbose: + print('\nCommand that failed:\n%s' % cmd) + if not can_fail: + sys.exit(1) + +# Verifies whether pathA is a subdirectory (or the same) as pathB +def is_pathA_subdir_of_pathB(pathA, pathB): + pathA = os.path.realpath(pathA) + '/' + pathB = os.path.realpath(pathB) + '/' + return(pathB == pathA[:len(pathB)]) + +# Find the necessary bins - repo +repo_bin = which('repo') +if repo_bin == None: + repo_bin = os.path.join(os.environ["HOME"], 'repo') + if not is_exe(repo_bin): + sys.stderr.write('ERROR: Could not find the repo program in either $PATH or $HOME/bin\n') + sys.exit(1) + +# Find the necessary bins - git +git_bin = which('git') +if not is_exe(git_bin): + sys.stderr.write('ERROR: Could not find the git program in $PATH\n') + sys.exit(1) + +# Change current directory to the top of the tree +if 'ANDROID_BUILD_TOP' in os.environ: + top = os.environ['ANDROID_BUILD_TOP'] + if not is_pathA_subdir_of_pathB(os.getcwd(), top): + sys.stderr.write('ERROR: You must run this tool from within $ANDROID_BUILD_TOP!\n') + sys.exit(1) + os.chdir(os.environ['ANDROID_BUILD_TOP']) + +# Sanity check that we are being run from the top level of the tree +if not os.path.isdir('.repo'): + sys.stderr.write('ERROR: No .repo directory found. Please run this from the top of your tree.\n') + sys.exit(1) + +# If --abandon-first is given, abandon the branch before starting +if args.abandon_first: + # Determine if the branch already exists; skip the abandon if it does not + plist = subprocess.Popen([repo_bin,"info"], stdout=subprocess.PIPE) + needs_abandon = False + while(True): + pline = plist.stdout.readline().rstrip() + if not pline: + break + matchObj = re.match(r'Local Branches.*\[(.*)\]', pline.decode()) + if matchObj: + local_branches = re.split('\s*,\s*', matchObj.group(1)) + if any(args.start_branch[0] in s for s in local_branches): + needs_abandon = True + + if needs_abandon: + # Perform the abandon only if the branch already exists + if not args.quiet: + print('Abandoning branch: %s' % args.start_branch[0]) + cmd = '%s abandon %s' % (repo_bin, args.start_branch[0]) + execute_cmd(cmd) + if not args.quiet: + print('') + +# Get the list of projects that repo knows about +# - convert the project name to a project path +project_name_to_path = {} +plist = subprocess.Popen([repo_bin,"list"], stdout=subprocess.PIPE) +project_path = None +while(True): + pline = plist.stdout.readline().rstrip() + if not pline: + break + ppaths = re.split('\s*:\s*', pline.decode()) + project_name_to_path[ppaths[1]] = ppaths[0] + +# Get all commits for a specified query +def fetch_query(query): + url = 'http://review.slimroms.eu/changes/?q=%s' % query + if args.verbose: + print('Fetching all commits using query: %s\n' % query) + f = urllib.request.urlopen(url) + d = f.read().decode("utf-8") + if args.verbose: + print('Result from request:\n' + d) + + # Clean up the result + d = d.split(')]}\'\n')[1] + matchObj = re.match(r'\[\s*\]', d) + if matchObj: + sys.stderr.write('ERROR: Query %s was not found on the server\n' % query) + sys.exit(1) + d = re.sub(r'\[(.*)\]', r'\1', d) + if args.verbose: + print('Result from request:\n' + d) + + data = json.loads(d) + changelist = [] + for c in xrange(0, len(data)): + changelist.append(data[c]['_number']) + + # Reverse the array as we want to pick the lowest one first + args.change_number = reversed(changelist) + +if args.topic: + fetch_query("topic:{0}".format(args.topic)) + +if args.query: + fetch_query(args.query) + +# Check for range of commits and rebuild array +changelist = [] +for change in args.change_number: + c=str(change) + if '-' in c: + templist = c.split('-') + for i in range(int(templist[0]), int(templist[1]) + 1): + changelist.append(str(i)) + else: + changelist.append(c) + +args.change_number = changelist + +# Iterate through the requested change numbers +for changeps in args.change_number: + + if '/' in changeps: + change = changeps.split('/')[0] + patchset = changeps.split('/')[1] + else: + change = changeps + patchset = '' + + if not args.quiet: + if len(patchset) == 0: + print('Applying change number %s ...' % change) + else: + print('Applying change number {change}/{patchset} ...'.format(change=change, patchset=patchset)) + + if len(patchset) == 0: + query_revision = 'CURRENT_REVISION' + else: + query_revision = 'ALL_REVISIONS' + + # Fetch information about the change from Gerrit's REST API + # + # gerrit returns two lines, a magic string and then valid JSON: + # )]}' + # [ ... valid JSON ... ] + url = 'http://review.slimroms.eu/changes/?q={change}&o={query_revision}&o=CURRENT_COMMIT&pp=0'.format(change=change, query_revision=query_revision) + if args.verbose: + print('Fetching from: %s\n' % url) + try: + f = urllib.request.urlopen(url) + except urllib.error.URLError: + sys.stderr.write('ERROR: Server reported an error, or cannot be reached\n') + sys.exit(1) + d = f.read().decode("utf-8") + if args.verbose: + print('Result from request:\n' + d) + + # Clean up the result + d = d.split('\n')[1] + matchObj = re.match(r'\[\s*\]', d) + if matchObj: + sys.stderr.write('ERROR: Change number %s was not found on the server\n' % change) + sys.exit(1) + d = re.sub(r'\[(.*)\]', r'\1', d) + + # Parse the JSON + try: + data = json.loads(d) + except ValueError: + sys.stderr.write('ERROR: The response from the server could not be parsed properly\n') + if not args.verbose: + sys.stderr.write('The malformed response was: %s\n' % d) + sys.exit(1) + + # Extract information from the JSON response + date_fluff = '.000000000' + project_name = data['project'] + project_branch = data['branch'] + change_number = data['_number'] + status = data['status'] + patchsetfound = False + + if len(patchset) > 0: + try: + for revision in data['revisions']: + if (int(data['revisions'][revision]['_number']) == int(patchset)) and not patchsetfound: + target_revision = data['revisions'][revision] + if args.verbose: + print('Using found patch set {patchset} ...'.format(patchset=patchset)) + patchsetfound = True + break + if not patchsetfound: + print('ERROR: The patch set could not be found, using CURRENT_REVISION instead.') + except: + print('ERROR: The patch set could not be found, using CURRENT_REVISION instead.') + patchsetfound = False + + if not patchsetfound: + target_revision = data['revisions'][data['current_revision']] + + current_revision = data['revisions'][data['current_revision']] + + patch_number = target_revision['_number'] + fetch_url = target_revision['fetch']['anonymous http']['url'] + fetch_ref = target_revision['fetch']['anonymous http']['ref'] + author_name = current_revision['commit']['author']['name'] + author_email = current_revision['commit']['author']['email'] + author_date = current_revision['commit']['author']['date'].replace(date_fluff, '') + committer_name = current_revision['commit']['committer']['name'] + committer_email = current_revision['commit']['committer']['email'] + committer_date = current_revision['commit']['committer']['date'].replace(date_fluff, '') + subject = current_revision['commit']['subject'] + + # Check if commit has already been merged and exit if it has, unless -f is specified + if status == "MERGED": + if args.force: + print("!! Force-picking a merged commit !!\n") + else: + print("Commit already merged. Skipping the cherry pick.\nUse -f to force this pick.") + continue; + + # Convert the project name to a project path + # - check that the project path exists + if project_name in project_name_to_path: + project_path = project_name_to_path[project_name]; + + if project_path.startswith('hardware/qcom/'): + split_path = project_path.split('/') + # split_path[2] might be display or it might be display-caf, trim the -caf + split_path[2] = split_path[2].split('-')[0] + + # Need to treat hardware/qcom/{audio,display,media} specially + if split_path[2] == 'audio' or split_path[2] == 'display' or split_path[2] == 'media': + split_branch = project_branch.split('-') + + # display is extra special + if split_path[2] == 'display' and len(split_path) == 3: + project_path = '/'.join(split_path) + else: + project_path = '/'.join(split_path[:-1]) + + if len(split_branch) == 4 and split_branch[0] == 'cm' and split_branch[2] == 'caf': + project_path += '-caf/msm' + split_branch[3] + # audio and media are different from display + elif split_path[2] == 'audio' or split_path[2] == 'media': + project_path += '/default' + elif args.ignore_missing: + print('WARNING: Skipping %d since there is no project directory for: %s\n' % (change_number, project_name)) + continue; + else: + sys.stderr.write('ERROR: For %d, could not determine the project path for project %s\n' % (change_number, project_name)) + sys.exit(1) + + # If --start-branch is given, create the branch (more than once per path is okay; repo ignores gracefully) + if args.start_branch: + cmd = '%s start %s %s' % (repo_bin, args.start_branch[0], project_path) + execute_cmd(cmd) + + # Print out some useful info + if not args.quiet: + print('--> Subject: "%s"' % subject) + print('--> Project path: %s' % project_path) + print('--> Change number: %d (Patch Set %d)' % (change_number, patch_number)) + print('--> Author: %s <%s> %s' % (author_name, author_email, author_date)) + print('--> Committer: %s <%s> %s' % (committer_name, committer_email, committer_date)) + + # Try fetching from GitHub first + if args.verbose: + print('Trying to fetch the change from GitHub') + if args.pull: + cmd = 'cd %s && git pull --no-edit github %s' % (project_path, fetch_ref) + else: + cmd = 'cd %s && git fetch github %s' % (project_path, fetch_ref) + execute_cmd(cmd, True) + # Check if it worked + FETCH_HEAD = '%s/.git/FETCH_HEAD' % project_path + if os.stat(FETCH_HEAD).st_size == 0: + # That didn't work, fetch from Gerrit instead + if args.verbose: + print('Fetching from GitHub didn\'t work, trying to fetch the change from Gerrit') + if args.pull: + cmd = 'cd %s && git pull --no-edit %s %s' % (project_path, fetch_url, fetch_ref) + else: + cmd = 'cd %s && git fetch %s %s' % (project_path, fetch_url, fetch_ref) + execute_cmd(cmd) + # Perform the cherry-pick + cmd = 'cd %s && git cherry-pick FETCH_HEAD' % (project_path) + if not args.pull: + execute_cmd(cmd) + if not args.quiet: + print('') + From 911dde45a33728ca9407e2f4a35dfe4311410727 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Wed, 29 Apr 2015 21:56:57 +0100 Subject: [PATCH 033/309] repopick: fix for Slim our branch doesn't have a '-' in the base name Change-Id: I93e4e9619339ea88cd1c329f85c1e0d56974897c --- tools/repopick.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/repopick.py b/tools/repopick.py index d14589694..15566bb91 100755 --- a/tools/repopick.py +++ b/tools/repopick.py @@ -352,7 +352,7 @@ def fetch_query(query): split_path[2] = split_path[2].split('-')[0] # Need to treat hardware/qcom/{audio,display,media} specially - if split_path[2] == 'audio' or split_path[2] == 'display' or split_path[2] == 'media': + if split_path[2] in ('audio', 'display', 'media'): split_branch = project_branch.split('-') # display is extra special @@ -361,8 +361,8 @@ def fetch_query(query): else: project_path = '/'.join(split_path[:-1]) - if len(split_branch) == 4 and split_branch[0] == 'cm' and split_branch[2] == 'caf': - project_path += '-caf/msm' + split_branch[3] + if len(split_branch) != 1 and split_branch[0].startswith('lp') and split_branch[1] == 'caf': + project_path += '-caf/msm' + split_branch[2] # audio and media are different from display elif split_path[2] == 'audio' or split_path[2] == 'media': project_path += '/default' From 359c61911dd62fff805ddf4be62fbcee28c9eab0 Mon Sep 17 00:00:00 2001 From: cybojenix Date: Fri, 28 Jun 2013 20:30:00 +0100 Subject: [PATCH 034/309] lets make the lunch list look a little neater It's very simple, but with more and more devices coming in, a long list won't cut it There are probably better ways of doing this, feel free to comment Change-Id: I9bd1fef920f985a133c882df9987b6be29dbc7b8 --- envsetup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index f037e99f1..4de13229d 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -500,9 +500,9 @@ function print_lunch_menu() local choice for choice in ${LUNCH_MENU_CHOICES[@]} do - echo " $i. $choice" + echo " $i. $choice " i=$(($i+1)) - done + done | column if [ "z${SLIM_DEVICES_ONLY}" != "z" ]; then echo "... and don't forget the bacon!" From 8ffc5ba9089d3c683b5633dc61cc9b15559f3fa1 Mon Sep 17 00:00:00 2001 From: Shareef Ali Date: Tue, 28 May 2013 22:44:31 -0500 Subject: [PATCH 035/309] build: get rid of the sparse expand stuff * i don't see a reason why we are doing this because we never deal with flashing system img rawly * This will put useless write on ssd.. * we don't have space on ssd Change-Id: Icd53d161b8515f5eca238b98ad68515d69caa34f --- tools/releasetools/build_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py index 357a666fc..2917f11c0 100755 --- a/tools/releasetools/build_image.py +++ b/tools/releasetools/build_image.py @@ -263,7 +263,7 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None): build_command = ["mkuserimg.sh"] if "extfs_sparse_flag" in prop_dict: build_command.append(prop_dict["extfs_sparse_flag"]) - run_fsck = True + #run_fsck = True build_command.extend([in_dir, out_file, fs_type, prop_dict["mount_point"]]) build_command.append(prop_dict["partition_size"]) From 3c4340c929d54ac8c4772650a8d3efa29336fa5f Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Tue, 6 Oct 2015 16:26:05 -0700 Subject: [PATCH 036/309] Add temporary hack to help with merge resolution. * Update for 6.0.1 Change-Id: I1207daf17c2bd3f7f18e35a7705635752535942f Signed-off-by: Josue Rivera --- .../check_target_files_signatures | 443 ++++++++++++++- .../check_target_files_signatures.py | 442 --------------- .../check_target_files_signatures.tmp | 1 + tools/releasetools/make_recovery_patch | 54 +- tools/releasetools/make_recovery_patch.py | 53 -- tools/releasetools/make_recovery_patch.tmp | 1 + tools/releasetools/sign_target_files_apks | 513 +++++++++++++++++- tools/releasetools/sign_target_files_apks.py | 512 ----------------- tools/releasetools/sign_target_files_apks.tmp | 1 + 9 files changed, 1010 insertions(+), 1010 deletions(-) mode change 120000 => 100755 tools/releasetools/check_target_files_signatures delete mode 100755 tools/releasetools/check_target_files_signatures.py create mode 120000 tools/releasetools/check_target_files_signatures.tmp mode change 120000 => 100755 tools/releasetools/make_recovery_patch delete mode 100755 tools/releasetools/make_recovery_patch.py create mode 120000 tools/releasetools/make_recovery_patch.tmp mode change 120000 => 100755 tools/releasetools/sign_target_files_apks delete mode 100755 tools/releasetools/sign_target_files_apks.py create mode 120000 tools/releasetools/sign_target_files_apks.tmp diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures deleted file mode 120000 index 9f62aa323..000000000 --- a/tools/releasetools/check_target_files_signatures +++ /dev/null @@ -1 +0,0 @@ -check_target_files_signatures.py \ No newline at end of file diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures new file mode 100755 index 000000000..5c541abc6 --- /dev/null +++ b/tools/releasetools/check_target_files_signatures @@ -0,0 +1,442 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Check the signatures of all APKs in a target_files .zip file. With +-c, compare the signatures of each package to the ones in a separate +target_files (usually a previously distributed build for the same +device) and flag any changes. + +Usage: check_target_file_signatures [flags] target_files + + -c (--compare_with) + Look for compatibility problems between the two sets of target + files (eg., packages whose keys have changed). + + -l (--local_cert_dirs) + Comma-separated list of top-level directories to scan for + .x509.pem files. Defaults to "vendor,build". Where cert files + can be found that match APK signatures, the filename will be + printed as the cert name, otherwise a hash of the cert plus its + subject string will be printed instead. + + -t (--text) + Dump the certificate information for both packages in comparison + mode (this output is normally suppressed). + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import os +import re +import shutil +import subprocess +import zipfile + +import common + +# Work around a bug in python's zipfile module that prevents opening +# of zipfiles if any entry has an extra field of between 1 and 3 bytes +# (which is common with zipaligned APKs). This overrides the +# ZipInfo._decodeExtra() method (which contains the bug) with an empty +# version (since we don't need to decode the extra field anyway). +class MyZipInfo(zipfile.ZipInfo): + def _decodeExtra(self): + pass +zipfile.ZipInfo = MyZipInfo + +OPTIONS = common.OPTIONS + +OPTIONS.text = False +OPTIONS.compare_with = None +OPTIONS.local_cert_dirs = ("vendor", "build") + +PROBLEMS = [] +PROBLEM_PREFIX = [] + +def AddProblem(msg): + PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) +def Push(msg): + PROBLEM_PREFIX.append(msg) +def Pop(): + PROBLEM_PREFIX.pop() + + +def Banner(msg): + print "-" * 70 + print " ", msg + print "-" * 70 + + +def GetCertSubject(cert): + p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = p.communicate(cert) + if err and not err.strip(): + return "(error reading cert subject)" + for line in out.split("\n"): + line = line.strip() + if line.startswith("Subject:"): + return line[8:].strip() + return "(unknown cert subject)" + + +class CertDB(object): + def __init__(self): + self.certs = {} + + def Add(self, cert, name=None): + if cert in self.certs: + if name: + self.certs[cert] = self.certs[cert] + "," + name + else: + if name is None: + name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], + GetCertSubject(cert)) + self.certs[cert] = name + + def Get(self, cert): + """Return the name for a given cert.""" + return self.certs.get(cert, None) + + def FindLocalCerts(self): + to_load = [] + for top in OPTIONS.local_cert_dirs: + for dirpath, _, filenames in os.walk(top): + certs = [os.path.join(dirpath, i) + for i in filenames if i.endswith(".x509.pem")] + if certs: + to_load.extend(certs) + + for i in to_load: + f = open(i) + cert = common.ParseCertificate(f.read()) + f.close() + name, _ = os.path.splitext(i) + name, _ = os.path.splitext(name) + self.Add(cert, name) + +ALL_CERTS = CertDB() + + +def CertFromPKCS7(data, filename): + """Read the cert out of a PKCS#7-format file (which is what is + stored in a signed .apk).""" + Push(filename + ":") + try: + p = common.Run(["openssl", "pkcs7", + "-inform", "DER", + "-outform", "PEM", + "-print_certs"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = p.communicate(data) + if err and not err.strip(): + AddProblem("error reading cert:\n" + err) + return None + + cert = common.ParseCertificate(out) + if not cert: + AddProblem("error parsing cert output") + return None + return cert + finally: + Pop() + + +class APK(object): + def __init__(self, full_filename, filename): + self.filename = filename + self.certs = None + self.shared_uid = None + self.package = None + + Push(filename+":") + try: + self.RecordCerts(full_filename) + self.ReadManifest(full_filename) + finally: + Pop() + + def RecordCerts(self, full_filename): + out = set() + try: + f = open(full_filename) + apk = zipfile.ZipFile(f, "r") + pkcs7 = None + for info in apk.infolist(): + if info.filename.startswith("META-INF/") and \ + (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): + pkcs7 = apk.read(info.filename) + cert = CertFromPKCS7(pkcs7, info.filename) + out.add(cert) + ALL_CERTS.Add(cert) + if not pkcs7: + AddProblem("no signature") + finally: + f.close() + self.certs = frozenset(out) + + def ReadManifest(self, full_filename): + p = common.Run(["aapt", "dump", "xmltree", full_filename, + "AndroidManifest.xml"], + stdout=subprocess.PIPE) + manifest, err = p.communicate() + if err: + AddProblem("failed to read manifest") + return + + self.shared_uid = None + self.package = None + + for line in manifest.split("\n"): + line = line.strip() + m = re.search(r'A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) + if m: + name = m.group(1) + if name == "android:sharedUserId": + if self.shared_uid is not None: + AddProblem("multiple sharedUserId declarations") + self.shared_uid = m.group(2) + elif name == "package": + if self.package is not None: + AddProblem("multiple package declarations") + self.package = m.group(2) + + if self.package is None: + AddProblem("no package declaration") + + +class TargetFiles(object): + def __init__(self): + self.max_pkg_len = 30 + self.max_fn_len = 20 + self.apks = None + self.apks_by_basename = None + self.certmap = None + + def LoadZipFile(self, filename): + d, z = common.UnzipTemp(filename, '*.apk') + try: + self.apks = {} + self.apks_by_basename = {} + for dirpath, _, filenames in os.walk(d): + for fn in filenames: + if fn.endswith(".apk"): + fullname = os.path.join(dirpath, fn) + displayname = fullname[len(d)+1:] + apk = APK(fullname, displayname) + self.apks[apk.package] = apk + self.apks_by_basename[os.path.basename(apk.filename)] = apk + + self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) + self.max_fn_len = max(self.max_fn_len, len(apk.filename)) + finally: + shutil.rmtree(d) + + self.certmap = common.ReadApkCerts(z) + z.close() + + def CheckSharedUids(self): + """Look for any instances where packages signed with different + certs request the same sharedUserId.""" + apks_by_uid = {} + for apk in self.apks.itervalues(): + if apk.shared_uid: + apks_by_uid.setdefault(apk.shared_uid, []).append(apk) + + for uid in sorted(apks_by_uid.keys()): + apks = apks_by_uid[uid] + for apk in apks[1:]: + if apk.certs != apks[0].certs: + break + else: + # all packages have the same set of certs; this uid is fine. + continue + + AddProblem("different cert sets for packages with uid %s" % (uid,)) + + print "uid %s is shared by packages with different cert sets:" % (uid,) + for apk in apks: + print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) + for cert in apk.certs: + print " ", ALL_CERTS.Get(cert) + print + + def CheckExternalSignatures(self): + for apk_filename, certname in self.certmap.iteritems(): + if certname == "EXTERNAL": + # Apps marked EXTERNAL should be signed with the test key + # during development, then manually re-signed after + # predexopting. Consider it an error if this app is now + # signed with any key that is present in our tree. + apk = self.apks_by_basename[apk_filename] + name = ALL_CERTS.Get(apk.cert) + if not name.startswith("unknown "): + Push(apk.filename) + AddProblem("hasn't been signed with EXTERNAL cert") + Pop() + + def PrintCerts(self): + """Display a table of packages grouped by cert.""" + by_cert = {} + for apk in self.apks.itervalues(): + for cert in apk.certs: + by_cert.setdefault(cert, []).append((apk.package, apk)) + + order = [(-len(v), k) for (k, v) in by_cert.iteritems()] + order.sort() + + for _, cert in order: + print "%s:" % (ALL_CERTS.Get(cert),) + apks = by_cert[cert] + apks.sort() + for _, apk in apks: + if apk.shared_uid: + print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, + self.max_pkg_len, apk.package, + apk.shared_uid) + else: + print " %-*s %-*s" % (self.max_fn_len, apk.filename, + self.max_pkg_len, apk.package) + print + + def CompareWith(self, other): + """Look for instances where a given package that exists in both + self and other have different certs.""" + + all_apks = set(self.apks.keys()) + all_apks.update(other.apks.keys()) + + max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) + + by_certpair = {} + + for i in all_apks: + if i in self.apks: + if i in other.apks: + # in both; should have same set of certs + if self.apks[i].certs != other.apks[i].certs: + by_certpair.setdefault((other.apks[i].certs, + self.apks[i].certs), []).append(i) + else: + print "%s [%s]: new APK (not in comparison target_files)" % ( + i, self.apks[i].filename) + else: + if i in other.apks: + print "%s [%s]: removed APK (only in comparison target_files)" % ( + i, other.apks[i].filename) + + if by_certpair: + AddProblem("some APKs changed certs") + Banner("APK signing differences") + for (old, new), packages in sorted(by_certpair.items()): + for i, o in enumerate(old): + if i == 0: + print "was", ALL_CERTS.Get(o) + else: + print " ", ALL_CERTS.Get(o) + for i, n in enumerate(new): + if i == 0: + print "now", ALL_CERTS.Get(n) + else: + print " ", ALL_CERTS.Get(n) + for i in sorted(packages): + old_fn = other.apks[i].filename + new_fn = self.apks[i].filename + if old_fn == new_fn: + print " %-*s [%s]" % (max_pkg_len, i, old_fn) + else: + print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, + old_fn, new_fn) + print + + +def main(argv): + def option_handler(o, a): + if o in ("-c", "--compare_with"): + OPTIONS.compare_with = a + elif o in ("-l", "--local_cert_dirs"): + OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] + elif o in ("-t", "--text"): + OPTIONS.text = True + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="c:l:t", + extra_long_opts=["compare_with=", + "local_cert_dirs="], + extra_option_handler=option_handler) + + if len(args) != 1: + common.Usage(__doc__) + sys.exit(1) + + ALL_CERTS.FindLocalCerts() + + Push("input target_files:") + try: + target_files = TargetFiles() + target_files.LoadZipFile(args[0]) + finally: + Pop() + + compare_files = None + if OPTIONS.compare_with: + Push("comparison target_files:") + try: + compare_files = TargetFiles() + compare_files.LoadZipFile(OPTIONS.compare_with) + finally: + Pop() + + if OPTIONS.text or not compare_files: + Banner("target files") + target_files.PrintCerts() + target_files.CheckSharedUids() + target_files.CheckExternalSignatures() + if compare_files: + if OPTIONS.text: + Banner("comparison files") + compare_files.PrintCerts() + target_files.CompareWith(compare_files) + + if PROBLEMS: + print "%d problem(s) found:\n" % (len(PROBLEMS),) + for p in PROBLEMS: + print p + return 1 + + return 0 + + +if __name__ == '__main__': + try: + r = main(sys.argv[1:]) + sys.exit(r) + except common.ExternalError as e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py deleted file mode 100755 index 5c541abc6..000000000 --- a/tools/releasetools/check_target_files_signatures.py +++ /dev/null @@ -1,442 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2009 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Check the signatures of all APKs in a target_files .zip file. With --c, compare the signatures of each package to the ones in a separate -target_files (usually a previously distributed build for the same -device) and flag any changes. - -Usage: check_target_file_signatures [flags] target_files - - -c (--compare_with) - Look for compatibility problems between the two sets of target - files (eg., packages whose keys have changed). - - -l (--local_cert_dirs) - Comma-separated list of top-level directories to scan for - .x509.pem files. Defaults to "vendor,build". Where cert files - can be found that match APK signatures, the filename will be - printed as the cert name, otherwise a hash of the cert plus its - subject string will be printed instead. - - -t (--text) - Dump the certificate information for both packages in comparison - mode (this output is normally suppressed). - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import os -import re -import shutil -import subprocess -import zipfile - -import common - -# Work around a bug in python's zipfile module that prevents opening -# of zipfiles if any entry has an extra field of between 1 and 3 bytes -# (which is common with zipaligned APKs). This overrides the -# ZipInfo._decodeExtra() method (which contains the bug) with an empty -# version (since we don't need to decode the extra field anyway). -class MyZipInfo(zipfile.ZipInfo): - def _decodeExtra(self): - pass -zipfile.ZipInfo = MyZipInfo - -OPTIONS = common.OPTIONS - -OPTIONS.text = False -OPTIONS.compare_with = None -OPTIONS.local_cert_dirs = ("vendor", "build") - -PROBLEMS = [] -PROBLEM_PREFIX = [] - -def AddProblem(msg): - PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) -def Push(msg): - PROBLEM_PREFIX.append(msg) -def Pop(): - PROBLEM_PREFIX.pop() - - -def Banner(msg): - print "-" * 70 - print " ", msg - print "-" * 70 - - -def GetCertSubject(cert): - p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - out, err = p.communicate(cert) - if err and not err.strip(): - return "(error reading cert subject)" - for line in out.split("\n"): - line = line.strip() - if line.startswith("Subject:"): - return line[8:].strip() - return "(unknown cert subject)" - - -class CertDB(object): - def __init__(self): - self.certs = {} - - def Add(self, cert, name=None): - if cert in self.certs: - if name: - self.certs[cert] = self.certs[cert] + "," + name - else: - if name is None: - name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], - GetCertSubject(cert)) - self.certs[cert] = name - - def Get(self, cert): - """Return the name for a given cert.""" - return self.certs.get(cert, None) - - def FindLocalCerts(self): - to_load = [] - for top in OPTIONS.local_cert_dirs: - for dirpath, _, filenames in os.walk(top): - certs = [os.path.join(dirpath, i) - for i in filenames if i.endswith(".x509.pem")] - if certs: - to_load.extend(certs) - - for i in to_load: - f = open(i) - cert = common.ParseCertificate(f.read()) - f.close() - name, _ = os.path.splitext(i) - name, _ = os.path.splitext(name) - self.Add(cert, name) - -ALL_CERTS = CertDB() - - -def CertFromPKCS7(data, filename): - """Read the cert out of a PKCS#7-format file (which is what is - stored in a signed .apk).""" - Push(filename + ":") - try: - p = common.Run(["openssl", "pkcs7", - "-inform", "DER", - "-outform", "PEM", - "-print_certs"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - out, err = p.communicate(data) - if err and not err.strip(): - AddProblem("error reading cert:\n" + err) - return None - - cert = common.ParseCertificate(out) - if not cert: - AddProblem("error parsing cert output") - return None - return cert - finally: - Pop() - - -class APK(object): - def __init__(self, full_filename, filename): - self.filename = filename - self.certs = None - self.shared_uid = None - self.package = None - - Push(filename+":") - try: - self.RecordCerts(full_filename) - self.ReadManifest(full_filename) - finally: - Pop() - - def RecordCerts(self, full_filename): - out = set() - try: - f = open(full_filename) - apk = zipfile.ZipFile(f, "r") - pkcs7 = None - for info in apk.infolist(): - if info.filename.startswith("META-INF/") and \ - (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): - pkcs7 = apk.read(info.filename) - cert = CertFromPKCS7(pkcs7, info.filename) - out.add(cert) - ALL_CERTS.Add(cert) - if not pkcs7: - AddProblem("no signature") - finally: - f.close() - self.certs = frozenset(out) - - def ReadManifest(self, full_filename): - p = common.Run(["aapt", "dump", "xmltree", full_filename, - "AndroidManifest.xml"], - stdout=subprocess.PIPE) - manifest, err = p.communicate() - if err: - AddProblem("failed to read manifest") - return - - self.shared_uid = None - self.package = None - - for line in manifest.split("\n"): - line = line.strip() - m = re.search(r'A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) - if m: - name = m.group(1) - if name == "android:sharedUserId": - if self.shared_uid is not None: - AddProblem("multiple sharedUserId declarations") - self.shared_uid = m.group(2) - elif name == "package": - if self.package is not None: - AddProblem("multiple package declarations") - self.package = m.group(2) - - if self.package is None: - AddProblem("no package declaration") - - -class TargetFiles(object): - def __init__(self): - self.max_pkg_len = 30 - self.max_fn_len = 20 - self.apks = None - self.apks_by_basename = None - self.certmap = None - - def LoadZipFile(self, filename): - d, z = common.UnzipTemp(filename, '*.apk') - try: - self.apks = {} - self.apks_by_basename = {} - for dirpath, _, filenames in os.walk(d): - for fn in filenames: - if fn.endswith(".apk"): - fullname = os.path.join(dirpath, fn) - displayname = fullname[len(d)+1:] - apk = APK(fullname, displayname) - self.apks[apk.package] = apk - self.apks_by_basename[os.path.basename(apk.filename)] = apk - - self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) - self.max_fn_len = max(self.max_fn_len, len(apk.filename)) - finally: - shutil.rmtree(d) - - self.certmap = common.ReadApkCerts(z) - z.close() - - def CheckSharedUids(self): - """Look for any instances where packages signed with different - certs request the same sharedUserId.""" - apks_by_uid = {} - for apk in self.apks.itervalues(): - if apk.shared_uid: - apks_by_uid.setdefault(apk.shared_uid, []).append(apk) - - for uid in sorted(apks_by_uid.keys()): - apks = apks_by_uid[uid] - for apk in apks[1:]: - if apk.certs != apks[0].certs: - break - else: - # all packages have the same set of certs; this uid is fine. - continue - - AddProblem("different cert sets for packages with uid %s" % (uid,)) - - print "uid %s is shared by packages with different cert sets:" % (uid,) - for apk in apks: - print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) - for cert in apk.certs: - print " ", ALL_CERTS.Get(cert) - print - - def CheckExternalSignatures(self): - for apk_filename, certname in self.certmap.iteritems(): - if certname == "EXTERNAL": - # Apps marked EXTERNAL should be signed with the test key - # during development, then manually re-signed after - # predexopting. Consider it an error if this app is now - # signed with any key that is present in our tree. - apk = self.apks_by_basename[apk_filename] - name = ALL_CERTS.Get(apk.cert) - if not name.startswith("unknown "): - Push(apk.filename) - AddProblem("hasn't been signed with EXTERNAL cert") - Pop() - - def PrintCerts(self): - """Display a table of packages grouped by cert.""" - by_cert = {} - for apk in self.apks.itervalues(): - for cert in apk.certs: - by_cert.setdefault(cert, []).append((apk.package, apk)) - - order = [(-len(v), k) for (k, v) in by_cert.iteritems()] - order.sort() - - for _, cert in order: - print "%s:" % (ALL_CERTS.Get(cert),) - apks = by_cert[cert] - apks.sort() - for _, apk in apks: - if apk.shared_uid: - print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, - self.max_pkg_len, apk.package, - apk.shared_uid) - else: - print " %-*s %-*s" % (self.max_fn_len, apk.filename, - self.max_pkg_len, apk.package) - print - - def CompareWith(self, other): - """Look for instances where a given package that exists in both - self and other have different certs.""" - - all_apks = set(self.apks.keys()) - all_apks.update(other.apks.keys()) - - max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) - - by_certpair = {} - - for i in all_apks: - if i in self.apks: - if i in other.apks: - # in both; should have same set of certs - if self.apks[i].certs != other.apks[i].certs: - by_certpair.setdefault((other.apks[i].certs, - self.apks[i].certs), []).append(i) - else: - print "%s [%s]: new APK (not in comparison target_files)" % ( - i, self.apks[i].filename) - else: - if i in other.apks: - print "%s [%s]: removed APK (only in comparison target_files)" % ( - i, other.apks[i].filename) - - if by_certpair: - AddProblem("some APKs changed certs") - Banner("APK signing differences") - for (old, new), packages in sorted(by_certpair.items()): - for i, o in enumerate(old): - if i == 0: - print "was", ALL_CERTS.Get(o) - else: - print " ", ALL_CERTS.Get(o) - for i, n in enumerate(new): - if i == 0: - print "now", ALL_CERTS.Get(n) - else: - print " ", ALL_CERTS.Get(n) - for i in sorted(packages): - old_fn = other.apks[i].filename - new_fn = self.apks[i].filename - if old_fn == new_fn: - print " %-*s [%s]" % (max_pkg_len, i, old_fn) - else: - print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, - old_fn, new_fn) - print - - -def main(argv): - def option_handler(o, a): - if o in ("-c", "--compare_with"): - OPTIONS.compare_with = a - elif o in ("-l", "--local_cert_dirs"): - OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] - elif o in ("-t", "--text"): - OPTIONS.text = True - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="c:l:t", - extra_long_opts=["compare_with=", - "local_cert_dirs="], - extra_option_handler=option_handler) - - if len(args) != 1: - common.Usage(__doc__) - sys.exit(1) - - ALL_CERTS.FindLocalCerts() - - Push("input target_files:") - try: - target_files = TargetFiles() - target_files.LoadZipFile(args[0]) - finally: - Pop() - - compare_files = None - if OPTIONS.compare_with: - Push("comparison target_files:") - try: - compare_files = TargetFiles() - compare_files.LoadZipFile(OPTIONS.compare_with) - finally: - Pop() - - if OPTIONS.text or not compare_files: - Banner("target files") - target_files.PrintCerts() - target_files.CheckSharedUids() - target_files.CheckExternalSignatures() - if compare_files: - if OPTIONS.text: - Banner("comparison files") - compare_files.PrintCerts() - target_files.CompareWith(compare_files) - - if PROBLEMS: - print "%d problem(s) found:\n" % (len(PROBLEMS),) - for p in PROBLEMS: - print p - return 1 - - return 0 - - -if __name__ == '__main__': - try: - r = main(sys.argv[1:]) - sys.exit(r) - except common.ExternalError as e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) diff --git a/tools/releasetools/check_target_files_signatures.tmp b/tools/releasetools/check_target_files_signatures.tmp new file mode 120000 index 000000000..9f62aa323 --- /dev/null +++ b/tools/releasetools/check_target_files_signatures.tmp @@ -0,0 +1 @@ +check_target_files_signatures.py \ No newline at end of file diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch deleted file mode 120000 index 45cec0862..000000000 --- a/tools/releasetools/make_recovery_patch +++ /dev/null @@ -1 +0,0 @@ -make_recovery_patch.py \ No newline at end of file diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch new file mode 100755 index 000000000..08d145008 --- /dev/null +++ b/tools/releasetools/make_recovery_patch @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import os +import common + +OPTIONS = common.OPTIONS + +def main(argv): + # def option_handler(o, a): + # return False + + args = common.ParseOptions(argv, __doc__) + input_dir, output_dir = args + + OPTIONS.info_dict = common.LoadInfoDict(input_dir) + + recovery_img = common.GetBootableImage("recovery.img", "recovery.img", + input_dir, "RECOVERY") + boot_img = common.GetBootableImage("boot.img", "boot.img", + input_dir, "BOOT") + + if not recovery_img or not boot_img: + sys.exit(0) + + def output_sink(fn, data): + with open(os.path.join(output_dir, "SYSTEM", *fn.split("/")), "wb") as f: + f.write(data) + + common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py deleted file mode 100755 index 08d145008..000000000 --- a/tools/releasetools/make_recovery_patch.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2014 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import os -import common - -OPTIONS = common.OPTIONS - -def main(argv): - # def option_handler(o, a): - # return False - - args = common.ParseOptions(argv, __doc__) - input_dir, output_dir = args - - OPTIONS.info_dict = common.LoadInfoDict(input_dir) - - recovery_img = common.GetBootableImage("recovery.img", "recovery.img", - input_dir, "RECOVERY") - boot_img = common.GetBootableImage("boot.img", "boot.img", - input_dir, "BOOT") - - if not recovery_img or not boot_img: - sys.exit(0) - - def output_sink(fn, data): - with open(os.path.join(output_dir, "SYSTEM", *fn.split("/")), "wb") as f: - f.write(data) - - common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/tools/releasetools/make_recovery_patch.tmp b/tools/releasetools/make_recovery_patch.tmp new file mode 120000 index 000000000..45cec0862 --- /dev/null +++ b/tools/releasetools/make_recovery_patch.tmp @@ -0,0 +1 @@ +make_recovery_patch.py \ No newline at end of file diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks deleted file mode 120000 index b5ec59a25..000000000 --- a/tools/releasetools/sign_target_files_apks +++ /dev/null @@ -1 +0,0 @@ -sign_target_files_apks.py \ No newline at end of file diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks new file mode 100755 index 000000000..60d62c212 --- /dev/null +++ b/tools/releasetools/sign_target_files_apks @@ -0,0 +1,512 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Signs all the APK files in a target-files zipfile, producing a new +target-files zip. + +Usage: sign_target_files_apks [flags] input_target_files output_target_files + + -e (--extra_apks) + Add extra APK name/key pairs as though they appeared in + apkcerts.txt (so mappings specified by -k and -d are applied). + Keys specified in -e override any value for that app contained + in the apkcerts.txt file. Option may be repeated to give + multiple extra packages. + + -k (--key_mapping) + Add a mapping from the key name as specified in apkcerts.txt (the + src_key) to the real key you wish to sign the package with + (dest_key). Option may be repeated to give multiple key + mappings. + + -d (--default_key_mappings) + Set up the following key mappings: + + $devkey/devkey ==> $dir/releasekey + $devkey/testkey ==> $dir/releasekey + $devkey/media ==> $dir/media + $devkey/shared ==> $dir/shared + $devkey/platform ==> $dir/platform + + where $devkey is the directory part of the value of + default_system_dev_certificate from the input target-files's + META/misc_info.txt. (Defaulting to "build/target/product/security" + if the value is not present in misc_info. + + -d and -k options are added to the set of mappings in the order + in which they appear on the command line. + + -o (--replace_ota_keys) + Replace the certificate (public key) used by OTA package + verification with the one specified in the input target_files + zip (in the META/otakeys.txt file). Key remapping (-k and -d) + is performed on this key. + + -t (--tag_changes) <+tag>,<-tag>,... + Comma-separated list of changes to make to the set of tags (in + the last component of the build fingerprint). Prefix each with + '+' or '-' to indicate whether that tag should be added or + removed. Changes are processed in the order they appear. + Default value is "-test-keys,-dev-keys,+release-keys". + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import base64 +import cStringIO +import copy +import errno +import os +import re +import shutil +import subprocess +import tempfile +import zipfile + +import add_img_to_target_files +import common + +OPTIONS = common.OPTIONS + +OPTIONS.extra_apks = {} +OPTIONS.key_map = {} +OPTIONS.replace_ota_keys = False +OPTIONS.replace_verity_public_key = False +OPTIONS.replace_verity_private_key = False +OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") + +def GetApkCerts(tf_zip): + certmap = common.ReadApkCerts(tf_zip) + + # apply the key remapping to the contents of the file + for apk, cert in certmap.iteritems(): + certmap[apk] = OPTIONS.key_map.get(cert, cert) + + # apply all the -e options, overriding anything in the file + for apk, cert in OPTIONS.extra_apks.iteritems(): + if not cert: + cert = "PRESIGNED" + certmap[apk] = OPTIONS.key_map.get(cert, cert) + + return certmap + + +def CheckAllApksSigned(input_tf_zip, apk_key_map): + """Check that all the APKs we want to sign have keys specified, and + error out if they don't.""" + unknown_apks = [] + for info in input_tf_zip.infolist(): + if info.filename.endswith(".apk"): + name = os.path.basename(info.filename) + if name not in apk_key_map: + unknown_apks.append(name) + if unknown_apks: + print "ERROR: no key specified for:\n\n ", + print "\n ".join(unknown_apks) + print "\nUse '-e =' to specify a key (which may be an" + print "empty string to not sign this apk)." + sys.exit(1) + + +def SignApk(data, keyname, pw): + unsigned = tempfile.NamedTemporaryFile() + unsigned.write(data) + unsigned.flush() + + signed = tempfile.NamedTemporaryFile() + + common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) + + data = signed.read() + unsigned.close() + signed.close() + + return data + + +def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, + apk_key_map, key_passwords): + + maxsize = max([len(os.path.basename(i.filename)) + for i in input_tf_zip.infolist() + if i.filename.endswith('.apk')]) + rebuild_recovery = False + + tmpdir = tempfile.mkdtemp() + def write_to_temp(fn, attr, data): + fn = os.path.join(tmpdir, fn) + if fn.endswith("/"): + fn = os.path.join(tmpdir, fn) + os.mkdir(fn) + else: + d = os.path.dirname(fn) + if d and not os.path.exists(d): + os.makedirs(d) + + if attr >> 16 == 0xa1ff: + os.symlink(data, fn) + else: + with open(fn, "wb") as f: + f.write(data) + + for info in input_tf_zip.infolist(): + if info.filename.startswith("IMAGES/"): + continue + + data = input_tf_zip.read(info.filename) + out_info = copy.copy(info) + + if (info.filename == "META/misc_info.txt" and + OPTIONS.replace_verity_private_key): + ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, + OPTIONS.replace_verity_private_key[1]) + elif (info.filename == "BOOT/RAMDISK/verity_key" and + OPTIONS.replace_verity_public_key): + new_data = ReplaceVerityPublicKey(output_tf_zip, + OPTIONS.replace_verity_public_key[1]) + write_to_temp(info.filename, info.external_attr, new_data) + elif (info.filename.startswith("BOOT/") or + info.filename.startswith("RECOVERY/") or + info.filename.startswith("META/") or + info.filename == "SYSTEM/etc/recovery-resource.dat"): + write_to_temp(info.filename, info.external_attr, data) + + if info.filename.endswith(".apk"): + name = os.path.basename(info.filename) + key = apk_key_map[name] + if key not in common.SPECIAL_CERT_STRINGS: + print " signing: %-*s (%s)" % (maxsize, name, key) + signed_data = SignApk(data, key, key_passwords[key]) + common.ZipWriteStr(output_tf_zip, out_info, signed_data) + else: + # an APK we're not supposed to sign. + print "NOT signing: %s" % (name,) + common.ZipWriteStr(output_tf_zip, out_info, data) + elif info.filename in ("SYSTEM/build.prop", + "VENDOR/build.prop", + "BOOT/RAMDISK/default.prop", + "RECOVERY/RAMDISK/default.prop"): + print "rewriting %s:" % (info.filename,) + new_data = RewriteProps(data, misc_info) + common.ZipWriteStr(output_tf_zip, out_info, new_data) + if info.filename in ("BOOT/RAMDISK/default.prop", + "RECOVERY/RAMDISK/default.prop"): + write_to_temp(info.filename, info.external_attr, new_data) + elif info.filename.endswith("mac_permissions.xml"): + print "rewriting %s with new keys." % (info.filename,) + new_data = ReplaceCerts(data) + common.ZipWriteStr(output_tf_zip, out_info, new_data) + elif info.filename in ("SYSTEM/recovery-from-boot.p", + "SYSTEM/bin/install-recovery.sh"): + rebuild_recovery = True + elif (OPTIONS.replace_ota_keys and + info.filename in ("RECOVERY/RAMDISK/res/keys", + "SYSTEM/etc/security/otacerts.zip")): + # don't copy these files if we're regenerating them below + pass + elif (OPTIONS.replace_verity_private_key and + info.filename == "META/misc_info.txt"): + pass + elif (OPTIONS.replace_verity_public_key and + info.filename == "BOOT/RAMDISK/verity_key"): + pass + else: + # a non-APK file; copy it verbatim + common.ZipWriteStr(output_tf_zip, out_info, data) + + if OPTIONS.replace_ota_keys: + new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) + if new_recovery_keys: + write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys) + + if rebuild_recovery: + recovery_img = common.GetBootableImage( + "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info) + boot_img = common.GetBootableImage( + "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info) + + def output_sink(fn, data): + common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data) + + common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img, + info_dict=misc_info) + + shutil.rmtree(tmpdir) + + +def ReplaceCerts(data): + """Given a string of data, replace all occurences of a set + of X509 certs with a newer set of X509 certs and return + the updated data string.""" + for old, new in OPTIONS.key_map.iteritems(): + try: + if OPTIONS.verbose: + print " Replacing %s.x509.pem with %s.x509.pem" % (old, new) + f = open(old + ".x509.pem") + old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() + f.close() + f = open(new + ".x509.pem") + new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() + f.close() + # Only match entire certs. + pattern = "\\b"+old_cert16+"\\b" + (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE) + if OPTIONS.verbose: + print " Replaced %d occurence(s) of %s.x509.pem with " \ + "%s.x509.pem" % (num, old, new) + except IOError as e: + if e.errno == errno.ENOENT and not OPTIONS.verbose: + continue + + print " Error accessing %s. %s. Skip replacing %s.x509.pem " \ + "with %s.x509.pem." % (e.filename, e.strerror, old, new) + + return data + + +def EditTags(tags): + """Given a string containing comma-separated tags, apply the edits + specified in OPTIONS.tag_changes and return the updated string.""" + tags = set(tags.split(",")) + for ch in OPTIONS.tag_changes: + if ch[0] == "-": + tags.discard(ch[1:]) + elif ch[0] == "+": + tags.add(ch[1:]) + return ",".join(sorted(tags)) + + +def RewriteProps(data, misc_info): + output = [] + for line in data.split("\n"): + line = line.strip() + original_line = line + if line and line[0] != '#' and "=" in line: + key, value = line.split("=", 1) + if (key in ("ro.build.fingerprint", "ro.vendor.build.fingerprint") + and misc_info.get("oem_fingerprint_properties") is None): + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint") + and misc_info.get("oem_fingerprint_properties") is not None): + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif key == "ro.bootimage.build.fingerprint": + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif key == "ro.build.description": + pieces = value.split(" ") + assert len(pieces) == 5 + pieces[-1] = EditTags(pieces[-1]) + value = " ".join(pieces) + elif key == "ro.build.tags": + value = EditTags(value) + elif key == "ro.build.display.id": + # change, eg, "JWR66N dev-keys" to "JWR66N" + value = value.split() + if len(value) > 1 and value[-1].endswith("-keys"): + value.pop() + value = " ".join(value) + line = key + "=" + value + if line != original_line: + print " replace: ", original_line + print " with: ", line + output.append(line) + return "\n".join(output) + "\n" + + +def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): + try: + keylist = input_tf_zip.read("META/otakeys.txt").split() + except KeyError: + raise common.ExternalError("can't read META/otakeys.txt from input") + + extra_recovery_keys = misc_info.get("extra_recovery_keys", None) + if extra_recovery_keys: + extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" + for k in extra_recovery_keys.split()] + if extra_recovery_keys: + print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) + else: + extra_recovery_keys = [] + + mapped_keys = [] + for k in keylist: + m = re.match(r"^(.*)\.x509\.pem$", k) + if not m: + raise common.ExternalError( + "can't parse \"%s\" from META/otakeys.txt" % (k,)) + k = m.group(1) + mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") + + if mapped_keys: + print "using:\n ", "\n ".join(mapped_keys) + print "for OTA package verification" + else: + devkey = misc_info.get("default_system_dev_certificate", + "build/target/product/security/testkey") + mapped_keys.append( + OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") + print "META/otakeys.txt has no keys; using", mapped_keys[0] + + # recovery uses a version of the key that has been slightly + # predigested (by DumpPublicKey.java) and put in res/keys. + # extra_recovery_keys are used only in recovery. + + p = common.Run(["java", "-jar", + os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] + + mapped_keys + extra_recovery_keys, + stdout=subprocess.PIPE) + new_recovery_keys, _ = p.communicate() + if p.returncode != 0: + raise common.ExternalError("failed to run dumpkeys") + common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", + new_recovery_keys) + + # SystemUpdateActivity uses the x509.pem version of the keys, but + # put into a zipfile system/etc/security/otacerts.zip. + # We DO NOT include the extra_recovery_keys (if any) here. + + temp_file = cStringIO.StringIO() + certs_zip = zipfile.ZipFile(temp_file, "w") + for k in mapped_keys: + certs_zip.write(k) + certs_zip.close() + common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", + temp_file.getvalue()) + + return new_recovery_keys + +def ReplaceVerityPublicKey(targetfile_zip, key_path): + print "Replacing verity public key with %s" % key_path + with open(key_path) as f: + data = f.read() + common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data) + return data + +def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, + misc_info, key_path): + print "Replacing verity private key with %s" % key_path + current_key = misc_info["verity_key"] + original_misc_info = targetfile_input_zip.read("META/misc_info.txt") + new_misc_info = original_misc_info.replace(current_key, key_path) + common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info) + misc_info["verity_key"] = key_path + +def BuildKeyMap(misc_info, key_mapping_options): + for s, d in key_mapping_options: + if s is None: # -d option + devkey = misc_info.get("default_system_dev_certificate", + "build/target/product/security/testkey") + devkeydir = os.path.dirname(devkey) + + OPTIONS.key_map.update({ + devkeydir + "/testkey": d + "/releasekey", + devkeydir + "/devkey": d + "/releasekey", + devkeydir + "/media": d + "/media", + devkeydir + "/shared": d + "/shared", + devkeydir + "/platform": d + "/platform", + }) + else: + OPTIONS.key_map[s] = d + + +def main(argv): + + key_mapping_options = [] + + def option_handler(o, a): + if o in ("-e", "--extra_apks"): + names, key = a.split("=") + names = names.split(",") + for n in names: + OPTIONS.extra_apks[n] = key + elif o in ("-d", "--default_key_mappings"): + key_mapping_options.append((None, a)) + elif o in ("-k", "--key_mapping"): + key_mapping_options.append(a.split("=", 1)) + elif o in ("-o", "--replace_ota_keys"): + OPTIONS.replace_ota_keys = True + elif o in ("-t", "--tag_changes"): + new = [] + for i in a.split(","): + i = i.strip() + if not i or i[0] not in "-+": + raise ValueError("Bad tag change '%s'" % (i,)) + new.append(i[0] + i[1:].strip()) + OPTIONS.tag_changes = tuple(new) + elif o == "--replace_verity_public_key": + OPTIONS.replace_verity_public_key = (True, a) + elif o == "--replace_verity_private_key": + OPTIONS.replace_verity_private_key = (True, a) + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="e:d:k:ot:", + extra_long_opts=["extra_apks=", + "default_key_mappings=", + "key_mapping=", + "replace_ota_keys", + "tag_changes=", + "replace_verity_public_key=", + "replace_verity_private_key="], + extra_option_handler=option_handler) + + if len(args) != 2: + common.Usage(__doc__) + sys.exit(1) + + input_zip = zipfile.ZipFile(args[0], "r") + output_zip = zipfile.ZipFile(args[1], "w") + + misc_info = common.LoadInfoDict(input_zip) + + BuildKeyMap(misc_info, key_mapping_options) + + apk_key_map = GetApkCerts(input_zip) + CheckAllApksSigned(input_zip, apk_key_map) + + key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) + ProcessTargetFiles(input_zip, output_zip, misc_info, + apk_key_map, key_passwords) + + common.ZipClose(input_zip) + common.ZipClose(output_zip) + + add_img_to_target_files.AddImagesToTargetFiles(args[1]) + + print "done." + + +if __name__ == '__main__': + try: + main(sys.argv[1:]) + except common.ExternalError, e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py deleted file mode 100755 index 60d62c212..000000000 --- a/tools/releasetools/sign_target_files_apks.py +++ /dev/null @@ -1,512 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2008 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Signs all the APK files in a target-files zipfile, producing a new -target-files zip. - -Usage: sign_target_files_apks [flags] input_target_files output_target_files - - -e (--extra_apks) - Add extra APK name/key pairs as though they appeared in - apkcerts.txt (so mappings specified by -k and -d are applied). - Keys specified in -e override any value for that app contained - in the apkcerts.txt file. Option may be repeated to give - multiple extra packages. - - -k (--key_mapping) - Add a mapping from the key name as specified in apkcerts.txt (the - src_key) to the real key you wish to sign the package with - (dest_key). Option may be repeated to give multiple key - mappings. - - -d (--default_key_mappings) - Set up the following key mappings: - - $devkey/devkey ==> $dir/releasekey - $devkey/testkey ==> $dir/releasekey - $devkey/media ==> $dir/media - $devkey/shared ==> $dir/shared - $devkey/platform ==> $dir/platform - - where $devkey is the directory part of the value of - default_system_dev_certificate from the input target-files's - META/misc_info.txt. (Defaulting to "build/target/product/security" - if the value is not present in misc_info. - - -d and -k options are added to the set of mappings in the order - in which they appear on the command line. - - -o (--replace_ota_keys) - Replace the certificate (public key) used by OTA package - verification with the one specified in the input target_files - zip (in the META/otakeys.txt file). Key remapping (-k and -d) - is performed on this key. - - -t (--tag_changes) <+tag>,<-tag>,... - Comma-separated list of changes to make to the set of tags (in - the last component of the build fingerprint). Prefix each with - '+' or '-' to indicate whether that tag should be added or - removed. Changes are processed in the order they appear. - Default value is "-test-keys,-dev-keys,+release-keys". - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import base64 -import cStringIO -import copy -import errno -import os -import re -import shutil -import subprocess -import tempfile -import zipfile - -import add_img_to_target_files -import common - -OPTIONS = common.OPTIONS - -OPTIONS.extra_apks = {} -OPTIONS.key_map = {} -OPTIONS.replace_ota_keys = False -OPTIONS.replace_verity_public_key = False -OPTIONS.replace_verity_private_key = False -OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") - -def GetApkCerts(tf_zip): - certmap = common.ReadApkCerts(tf_zip) - - # apply the key remapping to the contents of the file - for apk, cert in certmap.iteritems(): - certmap[apk] = OPTIONS.key_map.get(cert, cert) - - # apply all the -e options, overriding anything in the file - for apk, cert in OPTIONS.extra_apks.iteritems(): - if not cert: - cert = "PRESIGNED" - certmap[apk] = OPTIONS.key_map.get(cert, cert) - - return certmap - - -def CheckAllApksSigned(input_tf_zip, apk_key_map): - """Check that all the APKs we want to sign have keys specified, and - error out if they don't.""" - unknown_apks = [] - for info in input_tf_zip.infolist(): - if info.filename.endswith(".apk"): - name = os.path.basename(info.filename) - if name not in apk_key_map: - unknown_apks.append(name) - if unknown_apks: - print "ERROR: no key specified for:\n\n ", - print "\n ".join(unknown_apks) - print "\nUse '-e =' to specify a key (which may be an" - print "empty string to not sign this apk)." - sys.exit(1) - - -def SignApk(data, keyname, pw): - unsigned = tempfile.NamedTemporaryFile() - unsigned.write(data) - unsigned.flush() - - signed = tempfile.NamedTemporaryFile() - - common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) - - data = signed.read() - unsigned.close() - signed.close() - - return data - - -def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, - apk_key_map, key_passwords): - - maxsize = max([len(os.path.basename(i.filename)) - for i in input_tf_zip.infolist() - if i.filename.endswith('.apk')]) - rebuild_recovery = False - - tmpdir = tempfile.mkdtemp() - def write_to_temp(fn, attr, data): - fn = os.path.join(tmpdir, fn) - if fn.endswith("/"): - fn = os.path.join(tmpdir, fn) - os.mkdir(fn) - else: - d = os.path.dirname(fn) - if d and not os.path.exists(d): - os.makedirs(d) - - if attr >> 16 == 0xa1ff: - os.symlink(data, fn) - else: - with open(fn, "wb") as f: - f.write(data) - - for info in input_tf_zip.infolist(): - if info.filename.startswith("IMAGES/"): - continue - - data = input_tf_zip.read(info.filename) - out_info = copy.copy(info) - - if (info.filename == "META/misc_info.txt" and - OPTIONS.replace_verity_private_key): - ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, - OPTIONS.replace_verity_private_key[1]) - elif (info.filename == "BOOT/RAMDISK/verity_key" and - OPTIONS.replace_verity_public_key): - new_data = ReplaceVerityPublicKey(output_tf_zip, - OPTIONS.replace_verity_public_key[1]) - write_to_temp(info.filename, info.external_attr, new_data) - elif (info.filename.startswith("BOOT/") or - info.filename.startswith("RECOVERY/") or - info.filename.startswith("META/") or - info.filename == "SYSTEM/etc/recovery-resource.dat"): - write_to_temp(info.filename, info.external_attr, data) - - if info.filename.endswith(".apk"): - name = os.path.basename(info.filename) - key = apk_key_map[name] - if key not in common.SPECIAL_CERT_STRINGS: - print " signing: %-*s (%s)" % (maxsize, name, key) - signed_data = SignApk(data, key, key_passwords[key]) - common.ZipWriteStr(output_tf_zip, out_info, signed_data) - else: - # an APK we're not supposed to sign. - print "NOT signing: %s" % (name,) - common.ZipWriteStr(output_tf_zip, out_info, data) - elif info.filename in ("SYSTEM/build.prop", - "VENDOR/build.prop", - "BOOT/RAMDISK/default.prop", - "RECOVERY/RAMDISK/default.prop"): - print "rewriting %s:" % (info.filename,) - new_data = RewriteProps(data, misc_info) - common.ZipWriteStr(output_tf_zip, out_info, new_data) - if info.filename in ("BOOT/RAMDISK/default.prop", - "RECOVERY/RAMDISK/default.prop"): - write_to_temp(info.filename, info.external_attr, new_data) - elif info.filename.endswith("mac_permissions.xml"): - print "rewriting %s with new keys." % (info.filename,) - new_data = ReplaceCerts(data) - common.ZipWriteStr(output_tf_zip, out_info, new_data) - elif info.filename in ("SYSTEM/recovery-from-boot.p", - "SYSTEM/bin/install-recovery.sh"): - rebuild_recovery = True - elif (OPTIONS.replace_ota_keys and - info.filename in ("RECOVERY/RAMDISK/res/keys", - "SYSTEM/etc/security/otacerts.zip")): - # don't copy these files if we're regenerating them below - pass - elif (OPTIONS.replace_verity_private_key and - info.filename == "META/misc_info.txt"): - pass - elif (OPTIONS.replace_verity_public_key and - info.filename == "BOOT/RAMDISK/verity_key"): - pass - else: - # a non-APK file; copy it verbatim - common.ZipWriteStr(output_tf_zip, out_info, data) - - if OPTIONS.replace_ota_keys: - new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) - if new_recovery_keys: - write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys) - - if rebuild_recovery: - recovery_img = common.GetBootableImage( - "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info) - boot_img = common.GetBootableImage( - "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info) - - def output_sink(fn, data): - common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data) - - common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img, - info_dict=misc_info) - - shutil.rmtree(tmpdir) - - -def ReplaceCerts(data): - """Given a string of data, replace all occurences of a set - of X509 certs with a newer set of X509 certs and return - the updated data string.""" - for old, new in OPTIONS.key_map.iteritems(): - try: - if OPTIONS.verbose: - print " Replacing %s.x509.pem with %s.x509.pem" % (old, new) - f = open(old + ".x509.pem") - old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() - f.close() - f = open(new + ".x509.pem") - new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() - f.close() - # Only match entire certs. - pattern = "\\b"+old_cert16+"\\b" - (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE) - if OPTIONS.verbose: - print " Replaced %d occurence(s) of %s.x509.pem with " \ - "%s.x509.pem" % (num, old, new) - except IOError as e: - if e.errno == errno.ENOENT and not OPTIONS.verbose: - continue - - print " Error accessing %s. %s. Skip replacing %s.x509.pem " \ - "with %s.x509.pem." % (e.filename, e.strerror, old, new) - - return data - - -def EditTags(tags): - """Given a string containing comma-separated tags, apply the edits - specified in OPTIONS.tag_changes and return the updated string.""" - tags = set(tags.split(",")) - for ch in OPTIONS.tag_changes: - if ch[0] == "-": - tags.discard(ch[1:]) - elif ch[0] == "+": - tags.add(ch[1:]) - return ",".join(sorted(tags)) - - -def RewriteProps(data, misc_info): - output = [] - for line in data.split("\n"): - line = line.strip() - original_line = line - if line and line[0] != '#' and "=" in line: - key, value = line.split("=", 1) - if (key in ("ro.build.fingerprint", "ro.vendor.build.fingerprint") - and misc_info.get("oem_fingerprint_properties") is None): - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint") - and misc_info.get("oem_fingerprint_properties") is not None): - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif key == "ro.bootimage.build.fingerprint": - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif key == "ro.build.description": - pieces = value.split(" ") - assert len(pieces) == 5 - pieces[-1] = EditTags(pieces[-1]) - value = " ".join(pieces) - elif key == "ro.build.tags": - value = EditTags(value) - elif key == "ro.build.display.id": - # change, eg, "JWR66N dev-keys" to "JWR66N" - value = value.split() - if len(value) > 1 and value[-1].endswith("-keys"): - value.pop() - value = " ".join(value) - line = key + "=" + value - if line != original_line: - print " replace: ", original_line - print " with: ", line - output.append(line) - return "\n".join(output) + "\n" - - -def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): - try: - keylist = input_tf_zip.read("META/otakeys.txt").split() - except KeyError: - raise common.ExternalError("can't read META/otakeys.txt from input") - - extra_recovery_keys = misc_info.get("extra_recovery_keys", None) - if extra_recovery_keys: - extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" - for k in extra_recovery_keys.split()] - if extra_recovery_keys: - print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) - else: - extra_recovery_keys = [] - - mapped_keys = [] - for k in keylist: - m = re.match(r"^(.*)\.x509\.pem$", k) - if not m: - raise common.ExternalError( - "can't parse \"%s\" from META/otakeys.txt" % (k,)) - k = m.group(1) - mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") - - if mapped_keys: - print "using:\n ", "\n ".join(mapped_keys) - print "for OTA package verification" - else: - devkey = misc_info.get("default_system_dev_certificate", - "build/target/product/security/testkey") - mapped_keys.append( - OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") - print "META/otakeys.txt has no keys; using", mapped_keys[0] - - # recovery uses a version of the key that has been slightly - # predigested (by DumpPublicKey.java) and put in res/keys. - # extra_recovery_keys are used only in recovery. - - p = common.Run(["java", "-jar", - os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] - + mapped_keys + extra_recovery_keys, - stdout=subprocess.PIPE) - new_recovery_keys, _ = p.communicate() - if p.returncode != 0: - raise common.ExternalError("failed to run dumpkeys") - common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", - new_recovery_keys) - - # SystemUpdateActivity uses the x509.pem version of the keys, but - # put into a zipfile system/etc/security/otacerts.zip. - # We DO NOT include the extra_recovery_keys (if any) here. - - temp_file = cStringIO.StringIO() - certs_zip = zipfile.ZipFile(temp_file, "w") - for k in mapped_keys: - certs_zip.write(k) - certs_zip.close() - common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", - temp_file.getvalue()) - - return new_recovery_keys - -def ReplaceVerityPublicKey(targetfile_zip, key_path): - print "Replacing verity public key with %s" % key_path - with open(key_path) as f: - data = f.read() - common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data) - return data - -def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, - misc_info, key_path): - print "Replacing verity private key with %s" % key_path - current_key = misc_info["verity_key"] - original_misc_info = targetfile_input_zip.read("META/misc_info.txt") - new_misc_info = original_misc_info.replace(current_key, key_path) - common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info) - misc_info["verity_key"] = key_path - -def BuildKeyMap(misc_info, key_mapping_options): - for s, d in key_mapping_options: - if s is None: # -d option - devkey = misc_info.get("default_system_dev_certificate", - "build/target/product/security/testkey") - devkeydir = os.path.dirname(devkey) - - OPTIONS.key_map.update({ - devkeydir + "/testkey": d + "/releasekey", - devkeydir + "/devkey": d + "/releasekey", - devkeydir + "/media": d + "/media", - devkeydir + "/shared": d + "/shared", - devkeydir + "/platform": d + "/platform", - }) - else: - OPTIONS.key_map[s] = d - - -def main(argv): - - key_mapping_options = [] - - def option_handler(o, a): - if o in ("-e", "--extra_apks"): - names, key = a.split("=") - names = names.split(",") - for n in names: - OPTIONS.extra_apks[n] = key - elif o in ("-d", "--default_key_mappings"): - key_mapping_options.append((None, a)) - elif o in ("-k", "--key_mapping"): - key_mapping_options.append(a.split("=", 1)) - elif o in ("-o", "--replace_ota_keys"): - OPTIONS.replace_ota_keys = True - elif o in ("-t", "--tag_changes"): - new = [] - for i in a.split(","): - i = i.strip() - if not i or i[0] not in "-+": - raise ValueError("Bad tag change '%s'" % (i,)) - new.append(i[0] + i[1:].strip()) - OPTIONS.tag_changes = tuple(new) - elif o == "--replace_verity_public_key": - OPTIONS.replace_verity_public_key = (True, a) - elif o == "--replace_verity_private_key": - OPTIONS.replace_verity_private_key = (True, a) - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="e:d:k:ot:", - extra_long_opts=["extra_apks=", - "default_key_mappings=", - "key_mapping=", - "replace_ota_keys", - "tag_changes=", - "replace_verity_public_key=", - "replace_verity_private_key="], - extra_option_handler=option_handler) - - if len(args) != 2: - common.Usage(__doc__) - sys.exit(1) - - input_zip = zipfile.ZipFile(args[0], "r") - output_zip = zipfile.ZipFile(args[1], "w") - - misc_info = common.LoadInfoDict(input_zip) - - BuildKeyMap(misc_info, key_mapping_options) - - apk_key_map = GetApkCerts(input_zip) - CheckAllApksSigned(input_zip, apk_key_map) - - key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) - ProcessTargetFiles(input_zip, output_zip, misc_info, - apk_key_map, key_passwords) - - common.ZipClose(input_zip) - common.ZipClose(output_zip) - - add_img_to_target_files.AddImagesToTargetFiles(args[1]) - - print "done." - - -if __name__ == '__main__': - try: - main(sys.argv[1:]) - except common.ExternalError, e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) diff --git a/tools/releasetools/sign_target_files_apks.tmp b/tools/releasetools/sign_target_files_apks.tmp new file mode 120000 index 000000000..b5ec59a25 --- /dev/null +++ b/tools/releasetools/sign_target_files_apks.tmp @@ -0,0 +1 @@ +sign_target_files_apks.py \ No newline at end of file From 6da0bfca2e0668420d7d8ddc6733829537b31e62 Mon Sep 17 00:00:00 2001 From: William Roberts Date: Tue, 20 Aug 2013 16:02:53 -0700 Subject: [PATCH 037/309] Add PRODUCT_BOOTANIMATION Just add PRODUCT_BOOTANIMATION in your product makefiles and point it to the zip file you would like to have used as the boot animation. The coresponsing build system handles picking the last one, which is the last product to set this. Change-Id: I8c95a515a8fbb92d363141eb79e254712dccc162 Signed-off-by: William Roberts Fix for build break when PRODUCT_BOOTANIMATION is unset Change-Id: I236c2dd35ba0e632ed327ed6dc36324c9e59587a Restore original behavior when PRODUCT_BOOTANIMATION is undefined Change-Id: I3ee7141f7c26cee033b8a30824caf38fcacec5a8 --- core/product.mk | 1 + core/product_config.mk | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/core/product.mk b/core/product.mk index 258a39989..79ffec407 100644 --- a/core/product.mk +++ b/core/product.mk @@ -65,6 +65,7 @@ endef # _product_var_list := \ + PRODUCT_BOOTANIMATION \ PRODUCT_BUILD_PROP_OVERRIDES \ PRODUCT_NAME \ PRODUCT_MODEL \ diff --git a/core/product_config.mk b/core/product_config.mk index 41f9dbf56..020143128 100644 --- a/core/product_config.mk +++ b/core/product_config.mk @@ -342,6 +342,12 @@ endif # The optional : is used to indicate the owner of a vendor file. PRODUCT_COPY_FILES := \ $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_COPY_FILES)) +_boot_animation := $(strip $(lastword $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BOOTANIMATION))) +ifneq ($(_boot_animation),) +PRODUCT_COPY_FILES += \ + $(_boot_animation):system/media/bootanimation.zip +endif +_boot_animation := # A list of property assignments, like "key = value", with zero or more # whitespace characters on either side of the '='. From 397d815cf7063d2e5399a67ffbaea98ca80a872d Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Tue, 22 Oct 2013 22:23:27 -0700 Subject: [PATCH 038/309] build: Add script to generate extra images Change-Id: I53081c7a2b4f18bc8c8a27b04d311a6b078b2022 Use CM standard qcom naming Change-Id: I360b3dcf1fe82140a7993189e881f9d77b571bef --- core/generate_extra_images.mk | 318 ++++++++++++++++++++++++++++++++++ 1 file changed, 318 insertions(+) create mode 100644 core/generate_extra_images.mk diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk new file mode 100644 index 000000000..a0aa542e4 --- /dev/null +++ b/core/generate_extra_images.mk @@ -0,0 +1,318 @@ +# This makefile is used to generate extra images for QCOM targets +# persist, device tree & NAND images required for different QCOM targets. + +# These variables are required to make sure that the required +# files/targets are available before generating NAND images. +# This file is included from device/qcom//AndroidBoard.mk +# and gets parsed before build/core/Makefile, which has these +# variables defined. build/core/Makefile will overwrite these +# variables again. +INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img +INSTALLED_RAMDISK_TARGET := $(PRODUCT_OUT)/ramdisk.img +INSTALLED_SYSTEMIMAGE := $(PRODUCT_OUT)/system.img +INSTALLED_USERDATAIMAGE_TARGET := $(PRODUCT_OUT)/userdata.img +INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img +recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img + +#---------------------------------------------------------------------- +# Generate secure boot & recovery image +#---------------------------------------------------------------------- +ifeq ($(TARGET_BOOTIMG_SIGNED),true) +INSTALLED_SEC_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img.secure +INSTALLED_SEC_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img.secure + +ifneq ($(BUILD_TINY_ANDROID),true) +intermediates := $(call intermediates-dir-for,PACKAGING,recovery_patch) +RECOVERY_FROM_BOOT_PATCH := $(intermediates)/recovery_from_boot.p +endif + +ifndef TARGET_SHA_TYPE + TARGET_SHA_TYPE := sha256 +endif + +define build-sec-image + $(hide) mv -f $(1) $(1).nonsecure + $(hide) openssl dgst -$(TARGET_SHA_TYPE) -binary $(1).nonsecure > $(1).$(TARGET_SHA_TYPE) + $(hide) openssl rsautl -sign -in $(1).$(TARGET_SHA_TYPE) -inkey $(PRODUCT_PRIVATE_KEY) -out $(1).sig + $(hide) dd if=/dev/zero of=$(1).sig.padded bs=$(BOARD_KERNEL_PAGESIZE) count=1 + $(hide) dd if=$(1).sig of=$(1).sig.padded conv=notrunc + $(hide) cat $(1).nonsecure $(1).sig.padded > $(1).secure + $(hide) rm -rf $(1).$(TARGET_SHA_TYPE) $(1).sig $(1).sig.padded + $(hide) mv -f $(1).secure $(1) +endef + +$(INSTALLED_SEC_BOOTIMAGE_TARGET): $(INSTALLED_BOOTIMAGE_TARGET) $(RECOVERY_FROM_BOOT_PATCH) + $(hide) $(call build-sec-image,$(INSTALLED_BOOTIMAGE_TARGET)) + +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SEC_BOOTIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_SEC_BOOTIMAGE_TARGET) + +$(INSTALLED_SEC_RECOVERYIMAGE_TARGET): $(INSTALLED_RECOVERYIMAGE_TARGET) $(RECOVERY_FROM_BOOT_PATCH) + $(hide) $(call build-sec-image,$(INSTALLED_RECOVERYIMAGE_TARGET)) + +ifneq ($(BUILD_TINY_ANDROID),true) +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SEC_RECOVERYIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_SEC_RECOVERYIMAGE_TARGET) +endif # !BUILD_TINY_ANDROID +endif # TARGET_BOOTIMG_SIGNED + +#---------------------------------------------------------------------- +# Generate persist image (persist.img) +#---------------------------------------------------------------------- +TARGET_OUT_PERSIST := $(PRODUCT_OUT)/persist + +INTERNAL_PERSISTIMAGE_FILES := \ + $(filter $(TARGET_OUT_PERSIST)/%,$(ALL_DEFAULT_INSTALLED_MODULES)) + +INSTALLED_PERSISTIMAGE_TARGET := $(PRODUCT_OUT)/persist.img + +define build-persistimage-target + $(call pretty,"Target persist fs image: $(INSTALLED_PERSISTIMAGE_TARGET)") + @mkdir -p $(TARGET_OUT_PERSIST) + $(hide) $(MKEXTUSERIMG) -s $(TARGET_OUT_PERSIST) $@ ext4 persist $(BOARD_PERSISTIMAGE_PARTITION_SIZE) + $(hide) chmod a+r $@ + $(hide) $(call assert-max-image-size,$@,$(BOARD_PERSISTIMAGE_PARTITION_SIZE),yaffs) +endef + +$(INSTALLED_PERSISTIMAGE_TARGET): $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(INTERNAL_PERSISTIMAGE_FILES) + $(build-persistimage-target) + +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_PERSISTIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_PERSISTIMAGE_TARGET) + + +#---------------------------------------------------------------------- +# Generate device tree image (dt.img) +#---------------------------------------------------------------------- +ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) +ifeq ($(strip $(BUILD_TINY_ANDROID)),true) +include device/qcom/common/dtbtool/Android.mk +endif + +DTBTOOL := $(HOST_OUT_EXECUTABLES)/dtbTool$(HOST_EXECUTABLE_SUFFIX) + +INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img + +define build-dtimage-target + $(call pretty,"Target dt image: $(INSTALLED_DTIMAGE_TARGET)") + $(hide) $(DTBTOOL) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(KERNEL_OUT)/arch/arm/boot/ + $(hide) chmod a+r $@ +endef + +$(INSTALLED_DTIMAGE_TARGET): $(DTBTOOL) $(INSTALLED_KERNEL_TARGET) + $(build-dtimage-target) + +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET) +endif + + +#---------------------------------------------------------------------- +# Generate 1GB userdata image for 8930 +#---------------------------------------------------------------------- +ifeq ($(call is-board-platform-in-list,msm8960),true) + +1G_USER_OUT := $(PRODUCT_OUT)/1g_user_image +BOARD_1G_USERDATAIMAGE_PARTITION_SIZE := 5368709120 +INSTALLED_1G_USERDATAIMAGE_TARGET := $(1G_USER_OUT)/userdata.img + +define build-1g-userdataimage-target + $(call pretty,"Target 1G userdata fs image: $(INSTALLED_1G_USERDATAIMAGE_TARGET)") + @mkdir -p $(1G_USER_OUT) + $(hide) $(MKEXTUSERIMG) -s $(TARGET_OUT_DATA) $@ ext4 data $(BOARD_1G_USERDATAIMAGE_PARTITION_SIZE) + $(hide) chmod a+r $@ + $(hide) $(call assert-max-image-size,$@,$(BOARD_1G_USERDATAIMAGE_PARTITION_SIZE),yaffs) +endef + +$(INSTALLED_1G_USERDATAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET) + $(build-1g-userdataimage-target) + +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_1G_USERDATAIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_1G_USERDATAIMAGE_TARGET) + +endif + + +#---------------------------------------------------------------------- +# Generate NAND images +#---------------------------------------------------------------------- +ifeq ($(call is-board-platform-in-list,msm7x27a msm7x30),true) + +2K_NAND_OUT := $(PRODUCT_OUT)/2k_nand_images +4K_NAND_OUT := $(PRODUCT_OUT)/4k_nand_images +BCHECC_OUT := $(PRODUCT_OUT)/bchecc_images + +INSTALLED_2K_BOOTIMAGE_TARGET := $(2K_NAND_OUT)/boot.img +INSTALLED_2K_SYSTEMIMAGE_TARGET := $(2K_NAND_OUT)/system.img +INSTALLED_2K_USERDATAIMAGE_TARGET := $(2K_NAND_OUT)/userdata.img +INSTALLED_2K_PERSISTIMAGE_TARGET := $(2K_NAND_OUT)/persist.img +INSTALLED_2K_RECOVERYIMAGE_TARGET := $(2K_NAND_OUT)/recovery.img + +INSTALLED_4K_BOOTIMAGE_TARGET := $(4K_NAND_OUT)/boot.img +INSTALLED_4K_SYSTEMIMAGE_TARGET := $(4K_NAND_OUT)/system.img +INSTALLED_4K_USERDATAIMAGE_TARGET := $(4K_NAND_OUT)/userdata.img +INSTALLED_4K_PERSISTIMAGE_TARGET := $(4K_NAND_OUT)/persist.img +INSTALLED_4K_RECOVERYIMAGE_TARGET := $(4K_NAND_OUT)/recovery.img + +INSTALLED_BCHECC_BOOTIMAGE_TARGET := $(BCHECC_OUT)/boot.img +INSTALLED_BCHECC_SYSTEMIMAGE_TARGET := $(BCHECC_OUT)/system.img +INSTALLED_BCHECC_USERDATAIMAGE_TARGET := $(BCHECC_OUT)/userdata.img +INSTALLED_BCHECC_PERSISTIMAGE_TARGET := $(BCHECC_OUT)/persist.img +INSTALLED_BCHECC_RECOVERYIMAGE_TARGET := $(BCHECC_OUT)/recovery.img + +recovery_nand_fstab := $(TARGET_DEVICE_DIR)/recovery_nand.fstab + +NAND_BOOTIMAGE_ARGS := \ + --kernel $(INSTALLED_KERNEL_TARGET) \ + --ramdisk $(INSTALLED_RAMDISK_TARGET) \ + --cmdline "$(BOARD_KERNEL_CMDLINE)" \ + --base $(BOARD_KERNEL_BASE) + +NAND_RECOVERYIMAGE_ARGS := \ + --kernel $(INSTALLED_KERNEL_TARGET) \ + --ramdisk $(recovery_ramdisk) \ + --cmdline "$(BOARD_KERNEL_CMDLINE)" \ + --base $(BOARD_KERNEL_BASE) + +INTERNAL_4K_BOOTIMAGE_ARGS := $(NAND_BOOTIMAGE_ARGS) +INTERNAL_4K_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE) + +INTERNAL_2K_BOOTIMAGE_ARGS := $(NAND_BOOTIMAGE_ARGS) +INTERNAL_2K_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_2KPAGESIZE) + +INTERNAL_4K_MKYAFFS2_FLAGS := -c $(BOARD_KERNEL_PAGESIZE) +INTERNAL_4K_MKYAFFS2_FLAGS += -s $(BOARD_KERNEL_SPARESIZE) + +INTERNAL_2K_MKYAFFS2_FLAGS := -c $(BOARD_KERNEL_2KPAGESIZE) +INTERNAL_2K_MKYAFFS2_FLAGS += -s $(BOARD_KERNEL_2KSPARESIZE) + +INTERNAL_BCHECC_MKYAFFS2_FLAGS := -c $(BOARD_KERNEL_PAGESIZE) +INTERNAL_BCHECC_MKYAFFS2_FLAGS += -s $(BOARD_KERNEL_BCHECC_SPARESIZE) + +INTERNAL_4K_RECOVERYIMAGE_ARGS := $(NAND_RECOVERYIMAGE_ARGS) +INTERNAL_4K_RECOVERYIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE) + +INTERNAL_2K_RECOVERYIMAGE_ARGS := $(NAND_RECOVERYIMAGE_ARGS) +INTERNAL_2K_RECOVERYIMAGE_ARGS += --pagesize $(BOARD_KERNEL_2KPAGESIZE) + +# Generate boot image for NAND +ifeq ($(TARGET_BOOTIMG_SIGNED),true) + +ifndef TARGET_SHA_TYPE + TARGET_SHA_TYPE := sha256 +endif + +define build-nand-bootimage + @echo "target NAND boot image: $(3)" + $(hide) mkdir -p $(1) + $(hide) $(MKBOOTIMG) $(2) --output $(3).nonsecure + $(hide) openssl dgst -$(TARGET_SHA_TYPE) -binary $(3).nonsecure > $(3).$(TARGET_SHA_TYPE) + $(hide) openssl rsautl -sign -in $(3).$(TARGET_SHA_TYPE) -inkey $(PRODUCT_PRIVATE_KEY) -out $(3).sig + $(hide) dd if=/dev/zero of=$(3).sig.padded bs=$(BOARD_KERNEL_PAGESIZE) count=1 + $(hide) dd if=$(3).sig of=$(3).sig.padded conv=notrunc + $(hide) cat $(3).nonsecure $(3).sig.padded > $(3) + $(hide) rm -rf $(3).$(TARGET_SHA_TYPE) $(3).sig $(3).sig.padded +endef +else +define build-nand-bootimage + @echo "target NAND boot image: $(3)" + $(hide) mkdir -p $(1) + $(hide) $(MKBOOTIMG) $(2) --output $(3) +endef + $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE),raw) +endif + +# Generate system image for NAND +define build-nand-systemimage + @echo "target NAND system image: $(3)" + $(hide) mkdir -p $(1) + $(hide) $(MKYAFFS2) -f $(2) $(TARGET_OUT) $(3) + $(hide) chmod a+r $(3) + $(hide) $(call assert-max-image-size,$@,$(BOARD_SYSTEMIMAGE_PARTITION_SIZE),yaffs) +endef + +# Generate userdata image for NAND +define build-nand-userdataimage + @echo "target NAND userdata image: $(3)" + $(hide) mkdir -p $(1) + $(hide) $(MKYAFFS2) -f $(2) $(TARGET_OUT_DATA) $(3) + $(hide) chmod a+r $(3) + $(hide) $(call assert-max-image-size,$@,$(BOARD_USERDATAIMAGE_PARTITION_SIZE),yaffs) +endef + +# Generate persist image for NAND +define build-nand-persistimage + @echo "target NAND persist image: $(3)" + $(hide) mkdir -p $(1) + $(hide) $(MKYAFFS2) -f $(2) $(TARGET_OUT_PERSIST) $(3) + $(hide) chmod a+r $(3) + $(hide) $(call assert-max-image-size,$@,$(BOARD_PERSISTIMAGE_PARTITION_SIZE),yaffs) +endef + +$(INSTALLED_4K_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_BOOTIMAGE_TARGET) + $(hide) $(call build-nand-bootimage,$(4K_NAND_OUT),$(INTERNAL_4K_BOOTIMAGE_ARGS),$(INSTALLED_4K_BOOTIMAGE_TARGET)) +ifeq ($(call is-board-platform,msm7x27a),true) + $(hide) $(call build-nand-bootimage,$(2K_NAND_OUT),$(INTERNAL_2K_BOOTIMAGE_ARGS),$(INSTALLED_2K_BOOTIMAGE_TARGET)) + $(hide) $(call build-nand-bootimage,$(BCHECC_OUT),$(INTERNAL_4K_BOOTIMAGE_ARGS),$(INSTALLED_BCHECC_BOOTIMAGE_TARGET)) +endif # is-board-platform + +$(INSTALLED_4K_SYSTEMIMAGE_TARGET): $(MKYAFFS2) $(INSTALLED_SYSTEMIMAGE) + $(hide) $(call build-nand-systemimage,$(4K_NAND_OUT),$(INTERNAL_4K_MKYAFFS2_FLAGS),$(INSTALLED_4K_SYSTEMIMAGE_TARGET)) +ifeq ($(call is-board-platform,msm7x27a),true) + $(hide) $(call build-nand-systemimage,$(2K_NAND_OUT),$(INTERNAL_2K_MKYAFFS2_FLAGS),$(INSTALLED_2K_SYSTEMIMAGE_TARGET)) + $(hide) $(call build-nand-systemimage,$(BCHECC_OUT),$(INTERNAL_BCHECC_MKYAFFS2_FLAGS),$(INSTALLED_BCHECC_SYSTEMIMAGE_TARGET)) +endif # is-board-platform + +$(INSTALLED_4K_USERDATAIMAGE_TARGET): $(MKYAFFS2) $(INSTALLED_USERDATAIMAGE_TARGET) + $(hide) $(call build-nand-userdataimage,$(4K_NAND_OUT),$(INTERNAL_4K_MKYAFFS2_FLAGS),$(INSTALLED_4K_USERDATAIMAGE_TARGET)) +ifeq ($(call is-board-platform,msm7x27a),true) + $(hide) $(call build-nand-userdataimage,$(2K_NAND_OUT),$(INTERNAL_2K_MKYAFFS2_FLAGS),$(INSTALLED_2K_USERDATAIMAGE_TARGET)) + $(hide) $(call build-nand-userdataimage,$(BCHECC_OUT),$(INTERNAL_BCHECC_MKYAFFS2_FLAGS),$(INSTALLED_BCHECC_USERDATAIMAGE_TARGET)) +endif # is-board-platform + +$(INSTALLED_4K_PERSISTIMAGE_TARGET): $(MKYAFFS2) $(INSTALLED_PERSISTIMAGE_TARGET) + $(hide) $(call build-nand-persistimage,$(4K_NAND_OUT),$(INTERNAL_4K_MKYAFFS2_FLAGS),$(INSTALLED_4K_PERSISTIMAGE_TARGET)) +ifeq ($(call is-board-platform,msm7x27a),true) + $(hide) $(call build-nand-persistimage,$(2K_NAND_OUT),$(INTERNAL_2K_MKYAFFS2_FLAGS),$(INSTALLED_2K_PERSISTIMAGE_TARGET)) + $(hide) $(call build-nand-persistimage,$(BCHECC_OUT),$(INTERNAL_BCHECC_MKYAFFS2_FLAGS),$(INSTALLED_BCHECC_PERSISTIMAGE_TARGET)) +endif # is-board-platform + +$(INSTALLED_4K_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_RECOVERYIMAGE_TARGET) $(recovery_nand_fstab) + $(hide) cp -f $(recovery_nand_fstab) $(TARGET_RECOVERY_ROOT_OUT)/etc + $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) + $(hide) $(call build-nand-bootimage,$(4K_NAND_OUT),$(INTERNAL_4K_RECOVERYIMAGE_ARGS),$(INSTALLED_4K_RECOVERYIMAGE_TARGET)) +ifeq ($(call is-board-platform,msm7x27a),true) + $(hide) $(call build-nand-bootimage,$(2K_NAND_OUT),$(INTERNAL_2K_RECOVERYIMAGE_ARGS),$(INSTALLED_2K_RECOVERYIMAGE_TARGET)) + $(hide) $(call build-nand-bootimage,$(BCHECC_OUT),$(INTERNAL_4K_RECOVERYIMAGE_ARGS),$(INSTALLED_BCHECC_RECOVERYIMAGE_TARGET)) +endif # is-board-platform + +ALL_DEFAULT_INSTALLED_MODULES += \ + $(INSTALLED_4K_BOOTIMAGE_TARGET) \ + $(INSTALLED_4K_SYSTEMIMAGE_TARGET) \ + $(INSTALLED_4K_USERDATAIMAGE_TARGET) \ + $(INSTALLED_4K_PERSISTIMAGE_TARGET) + +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += \ + $(INSTALLED_4K_BOOTIMAGE_TARGET) \ + $(INSTALLED_4K_SYSTEMIMAGE_TARGET) \ + $(INSTALLED_4K_USERDATAIMAGE_TARGET) \ + $(INSTALLED_4K_PERSISTIMAGE_TARGET) + +ifneq ($(BUILD_TINY_ANDROID),true) +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_4K_RECOVERYIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_4K_RECOVERYIMAGE_TARGET) +endif # !BUILD_TINY_ANDROID + +endif # is-board-platform-in-list + +.PHONY: aboot +aboot: $(INSTALLED_BOOTLOADER_MODULE) + +.PHONY: kernel +kernel: $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_SEC_BOOTIMAGE_TARGET) $(INSTALLED_4K_BOOTIMAGE_TARGET) + +.PHONY: recoveryimage +recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET) $(INSTALLED_SEC_RECOVERYIMAGE_TARGET) $(INSTALLED_4K_RECOVERYIMAGE_TARGET) + +.PHONY: persistimage +persistimage: $(INSTALLED_PERSISTIMAGE_TARGET) $(INSTALLED_4K_PERSISTIMAGE_TARGET) From 8b3d332eff8d9116d86f35220183c467fc252b1d Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Tue, 5 Nov 2013 22:17:59 +0000 Subject: [PATCH 039/309] slim: Fix global selinux policy inclusion We're adding SLIM-specific policies to an overlay instead of patching external/sepolicy, so we need to make sure it's included to prevent misbehaving systems Change-Id: I7989a1093f9abc8360d492e73a860eb4afad10ad --- core/config.mk | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/config.mk b/core/config.mk index 61d8ad79f..dc2b7e059 100644 --- a/core/config.mk +++ b/core/config.mk @@ -687,4 +687,10 @@ endif RSCOMPAT_32BIT_ONLY_API_LEVELS := 8 9 10 11 12 13 14 15 16 17 18 19 20 RSCOMPAT_NO_USAGEIO_API_LEVELS := 8 9 10 11 12 13 +ifneq ($(SLIM_BUILD),) +## We need to be sure the global selinux policies are included +## last, to avoid accidental resetting by device configs +$(eval include vendor/slim/sepolicy/sepolicy.mk) +endif + include $(BUILD_SYSTEM)/dumpvar.mk From 3c9a5d37fcb77813d9d97fb2793a2335eea62428 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Fri, 1 Nov 2013 00:47:32 +0000 Subject: [PATCH 040/309] Don't make assumptions about the formats of build descriptions Change-Id: Id83a7594e9e1b9b4ffbdbaba695506d8d0d21a46 --- tools/releasetools/sign_target_files_apks | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks index 60d62c212..4538196ea 100755 --- a/tools/releasetools/sign_target_files_apks +++ b/tools/releasetools/sign_target_files_apks @@ -318,7 +318,7 @@ def RewriteProps(data, misc_info): value = "/".join(pieces) elif key == "ro.build.description": pieces = value.split(" ") - assert len(pieces) == 5 + #assert len(pieces) == 5 pieces[-1] = EditTags(pieces[-1]) value = " ".join(pieces) elif key == "ro.build.tags": From 52a9ea283e8e54148cccc9dce2981d478084bc35 Mon Sep 17 00:00:00 2001 From: cybojenix Date: Sat, 2 Nov 2013 17:18:36 +0400 Subject: [PATCH 041/309] core: main: remove all the spam about including make files Change-Id: Ifd2ef0e239d16b169c572472416c2c31df62a367 --- core/main.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/main.mk b/core/main.mk index f7a9557f7..f67362e1d 100644 --- a/core/main.mk +++ b/core/main.mk @@ -502,7 +502,7 @@ ifneq ($(dont_bother),true) subdir_makefiles := \ $(shell build/tools/findleaves.py $(FIND_LEAVES_EXCLUDES) $(subdirs) Android.mk) -$(foreach mk, $(subdir_makefiles), $(info including $(mk) ...)$(eval include $(mk))) +$(foreach mk, $(subdir_makefiles), $(eval include $(mk))) endif # dont_bother From 12e9a771c3e5ac8213647002b238ff5999f934a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20L=C3=B3pez?= Date: Sun, 3 Nov 2013 13:02:13 -0300 Subject: [PATCH 042/309] envsetup: do not print an error if complete fails complete is a bashism; so it will fail on other shells. Avoid printing an error if that is the case. Change-Id: Id6d6311792f409cc3a697c7a2bb003863f1afe60 --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index 4de13229d..2cfa6693b 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -649,7 +649,7 @@ function _lunch() COMPREPLY=( $(compgen -W "${LUNCH_MENU_CHOICES[*]}" -- ${cur}) ) return 0 } -complete -F _lunch lunch +complete -F _lunch lunch 2>/dev/null # Configures the build to build unbundled apps. # Run tapas with one or more app names (from LOCAL_PACKAGE_NAME) From 678c6b8ea07b1a3be47636057be7a44237efb2cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20L=C3=B3pez?= Date: Sun, 3 Nov 2013 13:05:43 -0300 Subject: [PATCH 043/309] envsetup: Mark zsh as compatible Basic functionality has been tested on zsh 5.0.2 and has been found to work correctly. Change-Id: I02e5e3bedf56b43104c280d9737ae7b334357643 --- envsetup.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index 2cfa6693b..46052a1bb 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1677,8 +1677,10 @@ if [ "x$SHELL" != "x/bin/bash" ]; then case `ps -o command -p $$` in *bash*) ;; + *zsh*) + ;; *) - echo "WARNING: Only bash is supported, use of other shell would lead to erroneous results" + echo "WARNING: Only bash and zsh are supported, use of other shell may lead to erroneous results" ;; esac fi From 75afddd5a0a741421ebd96b89cd5ac5f251bcdb8 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 13 Dec 2013 12:47:42 -0500 Subject: [PATCH 044/309] build: Add support for PRODUCT_COPY_FILES_OVERRIDES * We may have a case where prebuilts need to be built from source instead based on an externalized config. * New directive takes a list of destination files to remove from PRODUCT_COPY_FILES so that other instructions can be supplied. Change-Id: I7feff16440e54d1676ffddbbd96d5947efa43ede --- core/product.mk | 1 + core/product_config.mk | 16 ++++++++++++++++ core/tasks/product-graph.mk | 1 + 3 files changed, 18 insertions(+) diff --git a/core/product.mk b/core/product.mk index 79ffec407..733b812e8 100644 --- a/core/product.mk +++ b/core/product.mk @@ -84,6 +84,7 @@ _product_var_list := \ PRODUCT_DEFAULT_PROPERTY_OVERRIDES \ PRODUCT_CHARACTERISTICS \ PRODUCT_COPY_FILES \ + PRODUCT_COPY_FILES_OVERRIDES \ PRODUCT_OTA_PUBLIC_KEYS \ PRODUCT_EXTRA_RECOVERY_KEYS \ PRODUCT_PACKAGE_OVERLAYS \ diff --git a/core/product_config.mk b/core/product_config.mk index 020143128..5f5adc9b8 100644 --- a/core/product_config.mk +++ b/core/product_config.mk @@ -349,6 +349,22 @@ PRODUCT_COPY_FILES += \ endif _boot_animation := +# We might want to skip items listed in PRODUCT_COPY_FILES for +# various reasons. This is useful for replacing a binary module with one +# built from source. This should be a list of destination files under $OUT +PRODUCT_COPY_FILES_OVERRIDES := \ + $(addprefix %:, $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_COPY_FILES_OVERRIDES))) + +ifneq ($(PRODUCT_COPY_FILES_OVERRIDES),) + PRODUCT_COPY_FILES := $(filter-out $(PRODUCT_COPY_FILES_OVERRIDES), $(PRODUCT_COPY_FILES)) +endif + +.PHONY: listcopies +listcopies: + @echo "Copy files: $(PRODUCT_COPY_FILES)" + @echo "Overrides: $(PRODUCT_COPY_FILES_OVERRIDES)" + + # A list of property assignments, like "key = value", with zero or more # whitespace characters on either side of the '='. PRODUCT_PROPERTY_OVERRIDES := \ diff --git a/core/tasks/product-graph.mk b/core/tasks/product-graph.mk index 9641f3f31..94a1dc7fb 100644 --- a/core/tasks/product-graph.mk +++ b/core/tasks/product-graph.mk @@ -105,6 +105,7 @@ $(OUT_DIR)/products/$(strip $(1)).txt: $(this_makefile) $(hide) echo 'PRODUCT_DEFAULT_PROPERTY_OVERRIDES=$$(PRODUCTS.$(strip $(1)).PRODUCT_DEFAULT_PROPERTY_OVERRIDES)' >> $$@ $(hide) echo 'PRODUCT_CHARACTERISTICS=$$(PRODUCTS.$(strip $(1)).PRODUCT_CHARACTERISTICS)' >> $$@ $(hide) echo 'PRODUCT_COPY_FILES=$$(PRODUCTS.$(strip $(1)).PRODUCT_COPY_FILES)' >> $$@ + $(hide) echo 'PRODUCT_COPY_FILES_OVERRIDES=$$(PRODUCTS.$(strip $(1)).PRODUCT_COPY_FILES_OVERRIDES)' >> $$@ $(hide) echo 'PRODUCT_OTA_PUBLIC_KEYS=$$(PRODUCTS.$(strip $(1)).PRODUCT_OTA_PUBLIC_KEYS)' >> $$@ $(hide) echo 'PRODUCT_EXTRA_RECOVERY_KEYS=$$(PRODUCTS.$(strip $(1)).PRODUCT_EXTRA_RECOVERY_KEYS)' >> $$@ $(hide) echo 'PRODUCT_PACKAGE_OVERLAYS=$$(PRODUCTS.$(strip $(1)).PRODUCT_PACKAGE_OVERLAYS)' >> $$@ From 8bbfa9e644856d48d12c0ebf7b50d58e6d253261 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Tue, 16 Oct 2012 19:40:18 +0530 Subject: [PATCH 045/309] dopush: improvements - use $OUT instead of replacing device name with a new line and then "tail"ing it. - adb shell stop/start if file is SystemUI.apk or framework/* - and some cleanup. Change-Id: I49278406dc53285b4919f7d41116ad849ec38250 Conflicts: envsetup.sh --- envsetup.sh | 114 ++++++++++++++++++++++++++++------------------------ 1 file changed, 61 insertions(+), 53 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 46052a1bb..dbeb7eb34 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -742,59 +742,6 @@ EOF return $? } -# Credit for color strip sed: http://goo.gl/BoIcm -function mmmp() -{ - if [[ $# < 1 || $1 == "--help" || $1 == "-h" ]]; then - echo "mmmp [make arguments] " - return 1 - fi - - # Get product name from cm_ - PRODUCT=`echo $TARGET_PRODUCT | tr "_" "\n" | tail -n 1` - - adb start-server # Prevent unexpected starting server message from adb get-state in the next line - if [ $(adb get-state) != device -a $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then - echo "No device is online. Waiting for one..." - echo "Please connect USB and/or enable USB debugging" - until [ $(adb get-state) = device -o $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do - sleep 1 - done - echo "Device Found.." - fi - - adb root &> /dev/null - sleep 0.3 - adb wait-for-device &> /dev/null - sleep 0.3 - adb remount &> /dev/null - - mmm $* | tee .log - - # Install: - LOC=$(cat .log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Install' | cut -d ':' -f 2) - - # Copy: - LOC=$LOC $(cat .log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Copy' | cut -d ':' -f 2) - - for FILE in $LOC; do - # Get target file name (i.e. system/bin/adb) - TARGET=$(echo $FILE | sed "s/\/$PRODUCT\//\n/" | tail -n 1) - - # Don't send files that are not in /system. - if ! echo $TARGET | egrep '^system\/' > /dev/null ; then - continue - else - echo "Pushing: $TARGET" - adb push $FILE $TARGET - fi - done - rm -f .log - return 0 -} - -alias mmp='mmmp .' - function gettop { local TOPFILE=build/core/envsetup.mk @@ -1585,6 +1532,67 @@ function repopick() { $T/build/tools/repopick.py $@ } +# Credit for color strip sed: http://goo.gl/BoIcm +function dopush() +{ + local func=$1 + shift + + adb start-server # Prevent unexpected starting server message from adb get-state in the next line + if [ $(adb get-state) != device -a $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then + echo "No device is online. Waiting for one..." + echo "Please connect USB and/or enable USB debugging" + until [ $(adb get-state) = device -o $(adb shell busybox test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do + sleep 1 + done + echo "Device Found." + fi + + adb root &> /dev/null + sleep 0.3 + adb wait-for-device &> /dev/null + sleep 0.3 + adb remount &> /dev/null + + $func $* | tee $OUT/.log + + # Install: + LOC=$(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Install' | cut -d ':' -f 2) + + # Copy: + LOC=$LOC $(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Copy' | cut -d ':' -f 2) + + for FILE in $LOC; do + # Get target file name (i.e. system/bin/adb) + TARGET=$(echo $FILE | sed "s#$OUT/##") + + # Don't send files that are not in /system. + if ! echo $TARGET | egrep '^system\/' > /dev/null ; then + continue + else + case $TARGET in + system/app/SystemUI.apk|system/framework/*) + stop_n_start=true + ;; + *) + stop_n_start=false + ;; + esac + if $stop_n_start ; then adb shell stop ; fi + echo "Pushing: $TARGET" + adb push $FILE $TARGET + if $stop_n_start ; then adb shell start ; fi + fi + done + rm -f $OUT/.log + return 0 +} + +alias mmp='dopush mm' +alias mmmp='dopush mmm' +alias mkap='dopush mka' +alias cmkap='dopush cmka' + # Force JAVA_HOME to point to java 1.7 if it isn't already set. # # Note that the MacOS path for java 1.7 includes a minor revision number (sigh). From 4fedf13ae1cf0c83463ff572429dde0739fe5990 Mon Sep 17 00:00:00 2001 From: Nebojsa Cvetkovic Date: Fri, 9 Nov 2012 23:02:54 +0000 Subject: [PATCH 046/309] omnom (brunch + eat) command Change-Id: I524e5a1450de7ea9c93512eebcb10f320e68fa82 --- envsetup.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index dbeb7eb34..cac5052d4 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -523,6 +523,12 @@ function brunch() return $? } +function omnom +{ + brunch $* + eat +} + function breakfast() { target=$1 From c88d4bc985068c8cdfd6c6bebf79240921a50abc Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 23 Sep 2012 23:46:55 -0700 Subject: [PATCH 047/309] build: Add "installboot" command to install boot images * For devices that lack fastboot, this command will copy the boot image to a running device, write it out to the correct partition with dd, copy the modules and set the correct permissions. * Field surveys have discovered that a specific CM device maintainer is handling two devices, one with boot partition p7 and one at p8. * Extensive research has unveiled the fact that mixing up these partitions across these two specific devices will cause corruption of the TZ firmware, which cannot be restored and results in an unrecoverable brick. * Automate the process so this idiot (me) stops breaking shit. Change-Id: I0dc5449daf128181e2e349ea26ad5741cc87bfe7 --- envsetup.sh | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index cac5052d4..17e398a4f 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1538,6 +1538,46 @@ function repopick() { $T/build/tools/repopick.py $@ } +function installboot() +{ + if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ]; + then + echo "No recovery.fstab found. Build recovery first." + return 1 + fi + if [ ! -e "$OUT/boot.img" ]; + then + echo "No boot.img found. Run make bootimage first." + return 1 + fi + PARTITION=`grep "^\/boot" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` + if [ -z "$PARTITION" ]; + then + echo "Unable to determine boot partition." + return 1 + fi + adb start-server + adb root + sleep 1 + adb wait-for-device + adb remount + adb wait-for-device + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$CM_BUILD"); + then + adb push $OUT/boot.img /cache/ + for i in $OUT/system/lib/modules/*; + do + adb push $i /system/lib/modules/ + done + adb shell dd if=/cache/boot.img of=$PARTITION + adb shell chmod 644 /system/lib/modules/* + echo "Installation complete." + else + echo "The connected device does not appear to be $CM_BUILD, run away!" + fi +} + + # Credit for color strip sed: http://goo.gl/BoIcm function dopush() { From 0db8521e86324f939f27bed7864f74b9600bf398 Mon Sep 17 00:00:00 2001 From: Andrew Sutherland Date: Fri, 18 Nov 2011 00:45:55 -0600 Subject: [PATCH 048/309] envsetup: cmremote,cmgerrit,cmrebase,mka,reposync Add the cm specific functions (cmremote, cmgerrit, cmrebase, mka, reposync) that are present in the gingerbread branch Change-Id: If28a5a206b8bb9ead5242c60f6c1e6af27141b57 --- envsetup.sh | 324 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 324 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 17e398a4f..71889c6e1 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -22,6 +22,11 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - sepgrep: Greps on all local sepolicy files. - sgrep: Greps on all local source files. - godir: Go to the directory containing a file. +- cmremote: Add git remote for CM Gerrit Review +- cmgerrit: A Git wrapper that fetches/pushes patch from/to CM Gerrit Review +- cmrebase: Rebase a Gerrit change and push it again +- mka: Builds using SCHED_BATCH on all processors +- reposync: Parallel repo sync using ionice and SCHED_BATCH Environemnt options: - SANITIZE_HOST: Set to 'true' to use ASAN for all host modules. Note that @@ -1533,6 +1538,325 @@ function godir () { \cd $T/$pathname } +function cmremote() +{ + git remote rm cmremote 2> /dev/null + if [ ! -d .git ] + then + echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. + fi + GERRIT_REMOTE=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g) + if [ -z "$GERRIT_REMOTE" ] + then + echo Unable to set up the git remote, are you in the root of the repo? + return 0 + fi + CMUSER=`git config --get review.review.cyanogenmod.com.username` + if [ -z "$CMUSER" ] + then + git remote add cmremote ssh://review.cyanogenmod.com:29418/$GERRIT_REMOTE + else + git remote add cmremote ssh://$CMUSER@review.cyanogenmod.com:29418/$GERRIT_REMOTE + fi + echo You can now push to "cmremote". +} + +function cmgerrit() { + if [ $# -eq 0 ]; then + $FUNCNAME help + return 1 + fi + local user=`git config --get review.review.cyanogenmod.com.username` + local review=`git config --get remote.github.review` + local project=`git config --get remote.github.projectname` + local command=$1 + shift + case $command in + help) + if [ $# -eq 0 ]; then + cat <&2 "Gerrit username not found." + return 1 + fi + local local_branch remote_branch + case $1 in + *:*) + local_branch=${1%:*} + remote_branch=${1##*:} + ;; + *) + local_branch=HEAD + remote_branch=$1 + ;; + esac + shift + git push $@ ssh://$user@$review:29418/$project \ + $local_branch:refs/for/$remote_branch || return 1 + ;; + changes|for) + if [ "$FUNCNAME" = "cmgerrit" ]; then + echo >&2 "'$FUNCNAME $command' is deprecated." + fi + ;; + __cmg_err_no_arg) + if [ $# -lt 2 ]; then + echo >&2 "'$FUNCNAME $command' missing argument." + elif [ $2 -eq 0 ]; then + if [ -n "$3" ]; then + $FUNCNAME help $1 + else + echo >&2 "'$FUNCNAME $1' missing argument." + fi + else + return 1 + fi + ;; + __cmg_err_not_repo) + if [ -z "$review" -o -z "$project" ]; then + echo >&2 "Not currently in any reviewable repository." + else + return 1 + fi + ;; + __cmg_err_not_supported) + $FUNCNAME __cmg_err_no_arg $command $# && return + case $1 in + #TODO: filter more git commands that don't use refname + init|add|rm|mv|status|clone|remote|bisect|config|stash) + echo >&2 "'$FUNCNAME $1' is not supported." + ;; + *) return 1 ;; + esac + ;; + #TODO: other special cases? + *) + $FUNCNAME __cmg_err_not_supported $command && return 1 + $FUNCNAME __cmg_err_no_arg $command $# help && return 1 + $FUNCNAME __cmg_err_not_repo && return 1 + local args="$@" + local change pre_args refs_arg post_args + case "$args" in + *--\ *) + pre_args=${args%%-- *} + post_args="-- ${args#*-- }" + ;; + *) pre_args="$args" ;; + esac + args=($pre_args) + pre_args= + if [ ${#args[@]} -gt 0 ]; then + change=${args[${#args[@]}-1]} + fi + if [ ${#args[@]} -gt 1 ]; then + pre_args=${args[0]} + for ((i=1; i<${#args[@]}-1; i++)); do + pre_args="$pre_args ${args[$i]}" + done + fi + while ((1)); do + case $change in + ""|--) + $FUNCNAME help $command + return 1 + ;; + *@*) + if [ -z "$refs_arg" ]; then + refs_arg="@${change#*@}" + change=${change%%@*} + fi + ;; + *~*) + if [ -z "$refs_arg" ]; then + refs_arg="~${change#*~}" + change=${change%%~*} + fi + ;; + *^*) + if [ -z "$refs_arg" ]; then + refs_arg="^${change#*^}" + change=${change%%^*} + fi + ;; + *:*) + if [ -z "$refs_arg" ]; then + refs_arg=":${change#*:}" + change=${change%%:*} + fi + ;; + *) break ;; + esac + done + $FUNCNAME fetch $change \ + && git $command $pre_args FETCH_HEAD$refs_arg $post_args \ + || return 1 + ;; + esac +} + +function cmrebase() { + local repo=$1 + local refs=$2 + local pwd="$(pwd)" + local dir="$(gettop)/$repo" + + if [ -z $repo ] || [ -z $refs ]; then + echo "CyanogenMod Gerrit Rebase Usage: " + echo " cmrebase " + echo " The patch IDs appear on the Gerrit commands that are offered." + echo " They consist on a series of numbers and slashes, after the text" + echo " refs/changes. For example, the ID in the following command is 26/8126/2" + echo "" + echo " git[...]ges_apps_Camera refs/changes/26/8126/2 && git cherry-pick FETCH_HEAD" + echo "" + return + fi + + if [ ! -d $dir ]; then + echo "Directory $dir doesn't exist in tree." + return + fi + cd $dir + repo=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g) + echo "Starting branch..." + repo start tmprebase . + echo "Bringing it up to date..." + repo sync . + echo "Fetching change..." + git fetch "http://review.cyanogenmod.com/p/$repo" "refs/changes/$refs" && git cherry-pick FETCH_HEAD + if [ "$?" != "0" ]; then + echo "Error cherry-picking. Not uploading!" + return + fi + echo "Uploading..." + repo upload . + echo "Cleaning up..." + repo abandon tmprebase . + cd $pwd +} + +function mka() { + case `uname -s` in + Darwin) + make -j `sysctl hw.ncpu|cut -d" " -f2` "$@" + ;; + *) + schedtool -B -n 1 -e ionice -n 1 make -j `cat /proc/cpuinfo | grep "^processor" | wc -l` "$@" + ;; + esac +} + +function reposync() { + case `uname -s` in + Darwin) + repo sync -j 4 "$@" + ;; + *) + schedtool -B -n 1 -e ionice -n 1 repo sync -j 4 "$@" + ;; + esac +} + function repopick() { T=$(gettop) $T/build/tools/repopick.py $@ From f9c77d485bcedf4a453e944776062efcebb144a8 Mon Sep 17 00:00:00 2001 From: Alan Orth Date: Fri, 7 Sep 2012 11:44:27 +0300 Subject: [PATCH 049/309] envsetup.sh: Fix the `reposync` function It seems ionice can't find `repo` if it's in ~/bin, even if ~/bin is in the user's $PATH. Placing repo into /usr/bin or /usr/local/ bin works, but is undesirable (build instructions from both AOSP and CM tell users to put repo into ~/bin, and repo likes to update itself from time to time). This forces the reposync function to use the full path to the repo binary. Change-Id: I9dc4a3d4ec3f39563e3a84de7321189700079c2e --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index 71889c6e1..159f7d45c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1852,7 +1852,7 @@ function reposync() { repo sync -j 4 "$@" ;; *) - schedtool -B -n 1 -e ionice -n 1 repo sync -j 4 "$@" + schedtool -B -n 1 -e ionice -n 1 `which repo` sync -j 4 "$@" ;; esac } From 17b1d09d0d7713ba2c2a5adf977e9f21600e995b Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Wed, 24 Oct 2012 16:40:42 -0700 Subject: [PATCH 050/309] build: Add "installrecovery" command * Similar to "installboot", add an "installrecovery" command which writes the recovery image to the correct partition of a running device. Change-Id: I1dcca44fd0d8afa08ece9e99cd914547acb99c83 --- envsetup.sh | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 159f7d45c..ddeb0ba53 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -27,6 +27,9 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - cmrebase: Rebase a Gerrit change and push it again - mka: Builds using SCHED_BATCH on all processors - reposync: Parallel repo sync using ionice and SCHED_BATCH +- installboot: Installs a boot.img to the connected device. +- installrecovery: Installs a recovery.img to the connected device. + Environemnt options: - SANITIZE_HOST: Set to 'true' to use ASAN for all host modules. Note that @@ -1901,6 +1904,39 @@ function installboot() fi } +function installrecovery() +{ + if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ]; + then + echo "No recovery.fstab found. Build recovery first." + return 1 + fi + if [ ! -e "$OUT/recovery.img" ]; + then + echo "No recovery.img found. Run make recoveryimage first." + return 1 + fi + PARTITION=`grep "^\/recovery" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` + if [ -z "$PARTITION" ]; + then + echo "Unable to determine recovery partition." + return 1 + fi + adb start-server + adb root + sleep 1 + adb wait-for-device + adb remount + adb wait-for-device + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$CM_BUILD"); + then + adb push $OUT/recovery.img /cache/ + adb shell dd if=/cache/recovery.img of=$PARTITION + echo "Installation complete." + else + echo "The connected device does not appear to be $CM_BUILD, run away!" + fi +} # Credit for color strip sed: http://goo.gl/BoIcm function dopush() From f3caa1be9f2c0af55b241465bcc79e8569d2c4fa Mon Sep 17 00:00:00 2001 From: Arnav Gupta Date: Fri, 23 Nov 2012 10:47:44 -0700 Subject: [PATCH 051/309] mka: change how make is called in some environments make -j N xxx results in compiling target 'N' instead of 'xxx' make -jN xxx will always make 'xxx' with N number of threads Signed-off-by: Arnav Gupta Change-Id: I77f17c9286a5ef1d61163d8f978900fb4c24e2e7 --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index ddeb0ba53..4b1b7aa86 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1844,7 +1844,7 @@ function mka() { make -j `sysctl hw.ncpu|cut -d" " -f2` "$@" ;; *) - schedtool -B -n 1 -e ionice -n 1 make -j `cat /proc/cpuinfo | grep "^processor" | wc -l` "$@" + schedtool -B -n 1 -e ionice -n 1 make -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@" ;; esac } From 8486dd204ae3497afa87972eb5af46ed5f58baf9 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Thu, 27 Dec 2012 15:28:34 -0800 Subject: [PATCH 052/309] envsetup: Don't tread on my $PROMPT_COMMAND * I want my own PROMPT_COMMAND to coexist with Android's, so stash it in the environment and add it to the new command. Change-Id: I200902f135d0c5c620a8eb3ce9cefdba318cf9d3 envsetup: Fix $PROMPT_COMMAND Change I200902f135d0c5c620a8eb3ce9cefdba318cf9d3 broke the android PROMPT_COMMAND on scenarios where the existing prompt already had a hardstatus adjustment, since appending it to Android's just clobbered Android's in favor of the original. So instead of appending them, inject android's at the beggining of the existing one (and create a default if none exists) Change-Id: I1fbcbfdb6220d886dda3662da498d1759a4c195b envsetup: Really fix PROMPT_COMMAND handling * Add ANDROID_NO_PROMPT_COMMAND to not change it at all. * Export ANDROID_PROMPT_PREFIX so it can be used elsewhere. Change-Id: Ib2a487404d3b8a367a21582e8a0ce69c2d212a9a envsetup: More PROMPT_COMMAND * Get rid of ANDROID_NO_PROMPT_COMMAND, the pre-existing STAY_OFF_MY_LAWN does the same thing * Remove any pre-existing android prefixes to avoid accumulation scenarios in consecutive builds (i.e., multiple build identification strings in hardstatus) Change-Id: I86661e066e18ea6ad0c742fa1dc6555780fa5441 envsetup: add quotes around PROMPT_COMMAND Better compatibility for zsh. Change-Id: Ie010c30580e67e3b5357e3cc869114a525762677 --- envsetup.sh | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 4b1b7aa86..68c1d2876 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -273,11 +273,26 @@ function settitle() local product=$TARGET_PRODUCT local variant=$TARGET_BUILD_VARIANT local apps=$TARGET_BUILD_APPS + if [ -z "$PROMPT_COMMAND" ]; then + # No prompts + PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\"" + elif [ -z "$(echo $PROMPT_COMMAND | grep '033]0;')" ]; then + # Prompts exist, but no hardstatus + PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\";${PROMPT_COMMAND}" + fi + if [ ! -z "$ANDROID_PROMPT_PREFIX" ]; then + PROMPT_COMMAND=$(echo $PROMPT_COMMAND | sed -e 's/$ANDROID_PROMPT_PREFIX //g') + fi + if [ -z "$apps" ]; then - export PROMPT_COMMAND="echo -ne \"\033]0;[${arch}-${product}-${variant}] ${USER}@${HOSTNAME}: ${PWD}\007\"" + ANDROID_PROMPT_PREFIX="[${arch}-${product}-${variant}]" else - export PROMPT_COMMAND="echo -ne \"\033]0;[$arch $apps $variant] ${USER}@${HOSTNAME}: ${PWD}\007\"" + ANDROID_PROMPT_PREFIX="[$arch $apps $variant]" fi + export ANDROID_PROMPT_PREFIX + + # Inject build data into hardstatus + export PROMPT_COMMAND="$(echo $PROMPT_COMMAND | sed -e 's/\\033]0;\(.*\)\\007/\\033]0;$ANDROID_PROMPT_PREFIX \1\\007/g')" fi } From 3bb71fbb07042dd0a24c56071bb9a25a5864b3c3 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sat, 13 Apr 2013 13:23:35 -0700 Subject: [PATCH 053/309] envsetup: Make installboot work in recovery mode too * Needs a change to adb to allow waiting for recovery Change-Id: Ia90645513f46bcb5f20b7c74c55ef15842710d3d --- envsetup.sh | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 68c1d2876..cceb73551 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1901,10 +1901,9 @@ function installboot() adb start-server adb root sleep 1 - adb wait-for-device - adb remount - adb wait-for-device - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$CM_BUILD"); + adb wait-for-online shell mount /system 2>&1 > /dev/null + adb wait-for-online remount + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); then adb push $OUT/boot.img /cache/ for i in $OUT/system/lib/modules/*; @@ -1915,7 +1914,7 @@ function installboot() adb shell chmod 644 /system/lib/modules/* echo "Installation complete." else - echo "The connected device does not appear to be $CM_BUILD, run away!" + echo "The connected device does not appear to be $SLIM_BUILD, run away!" fi } @@ -1940,16 +1939,15 @@ function installrecovery() adb start-server adb root sleep 1 - adb wait-for-device - adb remount - adb wait-for-device - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$CM_BUILD"); + adb wait-for-online shell mount /system 2>&1 > /dev/null + adb wait-for-online remount + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); then adb push $OUT/recovery.img /cache/ adb shell dd if=/cache/recovery.img of=$PARTITION echo "Installation complete." else - echo "The connected device does not appear to be $CM_BUILD, run away!" + echo "The connected device does not appear to be $SLIM_BUILD, run away!" fi } From 821cdbbd7dccecc4fdc8f0aa43fde104f4fc7dec Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sun, 19 May 2013 19:54:32 +0530 Subject: [PATCH 054/309] dopush: push files to /data too Change-Id: I85e24575a04955ae17b978f038edc46e1e6bfa2c --- envsetup.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index cceb73551..faec09e36 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1985,8 +1985,9 @@ function dopush() # Get target file name (i.e. system/bin/adb) TARGET=$(echo $FILE | sed "s#$OUT/##") - # Don't send files that are not in /system. - if ! echo $TARGET | egrep '^system\/' > /dev/null ; then + # Don't send files that are not under /system or /data + if [ ! "echo $TARGET | egrep '^system\/' > /dev/null" -o \ + "echo $TARGET | egrep '^data\/' > /dev/null" ] ; then continue else case $TARGET in From 8f4f177e4aa8d18361b0ca3567dd6dfb330f3742 Mon Sep 17 00:00:00 2001 From: Christopher Lais Date: Mon, 3 Jun 2013 15:15:39 -0500 Subject: [PATCH 055/309] envsetup.sh: add missing dquotes Change-Id: Ic86a5dd36b0f32cd7dc4d2f9c187566398796ed6 --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index faec09e36..460a3bf3d 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -281,7 +281,7 @@ function settitle() PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\";${PROMPT_COMMAND}" fi if [ ! -z "$ANDROID_PROMPT_PREFIX" ]; then - PROMPT_COMMAND=$(echo $PROMPT_COMMAND | sed -e 's/$ANDROID_PROMPT_PREFIX //g') + PROMPT_COMMAND="$(echo $PROMPT_COMMAND | sed -e 's/$ANDROID_PROMPT_PREFIX //g')" fi if [ -z "$apps" ]; then From 4149a4f2f7dfa534aeb622153e9c11957e13e477 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sun, 30 Jun 2013 10:04:25 +0530 Subject: [PATCH 056/309] envsetup: add a helper to fix "out/target/common" not being really common * out/target/common isn't really common, and if you do back to back builds for multiple devices, that is noticable. * Use out/target/common-$device instead, and link the appropriate dir to out/target/common every time lunch() is run, if CM_FIXUP_COMMON_OUT is set. * Refer https://groups.google.com/forum/#!topic/android-building/ispbOgzoyg8 for more info. Change-Id: I11e7df0e68e2a60ce32576f06397d60fc9465b60 Conflicts: envsetup.sh --- envsetup.sh | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 460a3bf3d..d00c52e7c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -663,6 +663,8 @@ function lunch() echo + fixup_common_out_dir + set_stuff_for_environment printconfig } @@ -1880,6 +1882,24 @@ function repopick() { $T/build/tools/repopick.py $@ } +function fixup_common_out_dir() { + common_out_dir=$(get_build_var OUT_DIR)/target/common + target_device=$(get_build_var TARGET_DEVICE) + if [ ! -z $SLIM_FIXUP_COMMON_OUT ]; then + if [ -d ${common_out_dir} ] && [ ! -L ${common_out_dir} ]; then + mv ${common_out_dir} ${common_out_dir}-${target_device} + ln -s ${common_out_dir}-${target_device} ${common_out_dir} + else + [ -L ${common_out_dir} ] && rm ${common_out_dir} + mkdir -p ${common_out_dir}-${target_device} + ln -s ${common_out_dir}-${target_device} ${common_out_dir} + fi + else + [ -L ${common_out_dir} ] && rm ${common_out_dir} + mkdir -p ${common_out_dir} + fi +} + function installboot() { if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ]; From bc16bfea01ab3aa8154e112c1ffc0116a78968f1 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Sat, 27 Jul 2013 13:51:56 +0100 Subject: [PATCH 057/309] eat: Support v2 fstabs Change-Id: I5220d7e9232174e2acbdc2d6c931827af959f212 Conflicts: envsetup.sh --- envsetup.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index d00c52e7c..fc6970298 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1915,8 +1915,14 @@ function installboot() PARTITION=`grep "^\/boot" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` if [ -z "$PARTITION" ]; then - echo "Unable to determine boot partition." - return 1 + # Try for RECOVERY_FSTAB_VERSION = 2 + PARTITION=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}` + PARTITION_TYPE=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` + if [ -z "$PARTITION" ]; + then + echo "Unable to determine boot partition." + return 1 + fi fi adb start-server adb root From 862ce938dbbb06058352ee9a5b139078cb1b685b Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 9 Aug 2013 21:03:42 -0700 Subject: [PATCH 058/309] envsetup: Fix installrecovery for new fstab format Change-Id: I26ab441c28350cce70bb976b249b16c6b83ab8da --- envsetup.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index fc6970298..6fe176fb9 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1959,8 +1959,14 @@ function installrecovery() PARTITION=`grep "^\/recovery" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` if [ -z "$PARTITION" ]; then - echo "Unable to determine recovery partition." - return 1 + # Try for RECOVERY_FSTAB_VERSION = 2 + PARTITION=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}` + PARTITION_TYPE=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` + if [ -z "$PARTITION" ]; + then + echo "Unable to determine recovery partition." + return 1 + fi fi adb start-server adb root From 320c065a6b09e5a2e9b774ef7f5f6ca7d1990399 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sat, 10 Aug 2013 21:02:09 -0700 Subject: [PATCH 059/309] envsetup: Fix giant derp Change-Id: I490556f18c40c3614e4e554d7db3306a16ff99d8 --- envsetup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 6fe176fb9..b3c3e0341 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1960,8 +1960,8 @@ function installrecovery() if [ -z "$PARTITION" ]; then # Try for RECOVERY_FSTAB_VERSION = 2 - PARTITION=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}` - PARTITION_TYPE=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` + PARTITION=`grep "[[:space:]]\/recovery[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}` + PARTITION_TYPE=`grep "[[:space:]]\/recovery[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}` if [ -z "$PARTITION" ]; then echo "Unable to determine recovery partition." From 6d5350c25d08e961f18290c7a5c6a49110e5ab65 Mon Sep 17 00:00:00 2001 From: Sam Mortimer Date: Sat, 10 Aug 2013 22:57:08 -0700 Subject: [PATCH 060/309] build: allow dopush to work properly with network adb Change-Id: I9efe60814d66dc8eb17544e6c2b94473dc903133 Conflicts: envsetup.sh --- envsetup.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index b3c3e0341..262465289 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1999,8 +1999,18 @@ function dopush() echo "Device Found." fi + # retrieve IP and PORT info if we're using a TCP connection + TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \ + | head -1 | awk '{print $1}') adb root &> /dev/null sleep 0.3 + if [ -n "$TCPIPPORT" ] + then + # adb root just killed our connection + # so reconnect... + adb connect "$TCPIPPORT" + fi + adb wait-for-device &> /dev/null sleep 0.3 adb remount &> /dev/null From 7c8aca21a79cbca9252605d4800196de1884c9f7 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 30 Aug 2013 17:34:37 -0700 Subject: [PATCH 061/309] envsetup: Wait for device in installboot/installrecovery * Useful when you're racing against a bug that causes a kernel panic shortly after USB turns on :( Change-Id: I2a503e4b195e632ed5b67a77b942e80c47d0ff45 envsetup: Use wait-for-online so these cmds work in recovery Change-Id: I7a85e7068f373b1d726aa6b27a51862fa9b7c6fd Conflicts: envsetup.sh --- envsetup.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 262465289..a6a7b5b67 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1925,6 +1925,7 @@ function installboot() fi fi adb start-server + adb wait-for-online adb root sleep 1 adb wait-for-online shell mount /system 2>&1 > /dev/null @@ -1969,6 +1970,7 @@ function installrecovery() fi fi adb start-server + adb wait-for-online adb root sleep 1 adb wait-for-online shell mount /system 2>&1 > /dev/null From ff58f4774dadc3371b66b930228c73ae6aedc1ad Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Thu, 26 Sep 2013 00:10:20 +0100 Subject: [PATCH 062/309] Add a unique build number to every build SDK-readable via android.os.Build.INCREMENTAL Change-Id: If9885adebe12a56b2b2e353285721be695bc411b Fix exporting the unique build identifier Change-Id: Ie58e981b7e83a4351d137dfd8995dce7d365d950 Fix incremental build number on Darwin Darwin does not include sha1sum, but it should have openssl. Change-Id: If2b685c1478f9486bed21dfba0c655d9e02db72b Use openssl instead of sha1sum Change-Id: Iab8a74d4af6646104cb23312853f811a38bdec0d Conflicts: envsetup.sh --- envsetup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/envsetup.sh b/envsetup.sh index a6a7b5b67..e94141d16 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -82,6 +82,7 @@ function check_product() if (echo -n $1 | grep -q -e "^slim_") ; then SLIM_BUILD=$(echo -n $1 | sed -e 's/^slim_//g') + export BUILD_NUMBER=$((date +%s%N ; echo $SLIM_BUILD; hostname) | openssl sha1 | sed -e 's/.*=//g; s/ //g' | cut -c1-10) else SLIM_BUILD= fi From 5d74e63e7e2e04ed38e9767f58814cc4dff5370e Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Tue, 18 Jun 2013 14:17:21 -0700 Subject: [PATCH 063/309] add adb to the disttools Change-Id: I898fa5c9396606144e57dba8453f476b9e9440ab --- core/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/core/Makefile b/core/Makefile index 20c63dff9..4dd30316c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1374,6 +1374,7 @@ endif # host tools needed to build dist and OTA packages DISTTOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \ + $(HOST_OUT_EXECUTABLES)/adb \ $(HOST_OUT_EXECUTABLES)/mkbootfs \ $(HOST_OUT_EXECUTABLES)/mkbootimg \ $(HOST_OUT_EXECUTABLES)/fs_config \ From 525f0296b59ec62b5fa639d99d11560d705a09b8 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Mon, 1 Jul 2013 23:35:26 +0100 Subject: [PATCH 064/309] Sign final packages with a different key if requested ...and throw it into recovery builds as well Change-Id: Ic96d4d49d821cb03d5318e3e9ad93d02fb92573e --- core/Makefile | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/Makefile b/core/Makefile index 4dd30316c..aea71751c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -389,6 +389,9 @@ endif # exist with the suffixes ".x509.pem" and ".pk8". DEFAULT_KEY_CERT_PAIR := $(DEFAULT_SYSTEM_DEV_CERTIFICATE) +ifneq ($(OTA_PACKAGE_SIGNING_KEY),) + DEFAULT_KEY_CERT_PAIR := $(OTA_PACKAGE_SIGNING_KEY) +endif # Rules that need to be present for the all targets, even # if they don't do anything. @@ -866,6 +869,11 @@ endif # substitute other keys for this one. OTA_PUBLIC_KEYS := $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem +ifneq ($(OTA_PACKAGE_SIGNING_KEY),) + OTA_PUBLIC_KEYS := $(OTA_PACKAGE_SIGNING_KEY).x509.pem + PRODUCT_EXTRA_RECOVERY_KEYS := $(DEFAULT_SYSTEM_DEV_CERTIFICATE) +endif + # Generate a file containing the keys that will be read by the # recovery binary. RECOVERY_INSTALL_OTA_KEYS := \ From 262068308b3dd273a925945bae98418ed3f12a81 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Mon, 7 Oct 2013 02:34:47 -0700 Subject: [PATCH 065/309] build: Don't run backuptool on GMS builds Change-Id: I5dde27f9d16b88049171db9805221d92e67f3e5d --- core/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/Makefile b/core/Makefile index aea71751c..5ab0c0fc8 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1661,11 +1661,15 @@ else OTA_FROM_TARGET_SCRIPT := $(TARGET_RELEASETOOL_OTA_FROM_TARGET_SCRIPT) endif +ifeq ($(WITH_GMS),true) + $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false +else ifneq ($(SLIM_BUILD),) $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := true else $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false endif +endif ifeq ($(TARGET_OTA_ASSERT_DEVICE),) $(INTERNAL_OTA_PACKAGE_TARGET): override_device := auto From ced94135bc6c1e0cb7d2b06ddae61c2a7f64b304 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Fri, 18 Oct 2013 21:48:46 +0100 Subject: [PATCH 066/309] Store the path to the ota file generator where it can be reused Change-Id: I8896713c79a751b79fbbcc75f6ba30dad4ea9fac --- core/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/core/Makefile b/core/Makefile index 5ab0c0fc8..2bb7a2b6d 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1678,6 +1678,7 @@ else endif $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) + @echo "$(OTA_FROM_TARGET_SCRIPT)" > $(PRODUCT_OUT)/ota_script_path @echo -e ${CL_YLW}"Package OTA:"${CL_RST}" $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ $(OTA_FROM_TARGET_SCRIPT) -v \ From 119fa2b721677263496008f68fdb794f52495fbe Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sat, 26 Oct 2013 12:58:57 -0700 Subject: [PATCH 067/309] build: Add support for vendor product and device name * CM's build system assumes some convention-over-configuration which we might need to override in order to conform to vendor requirements. * Allow overriding of target and product names using these new variables. This allows for CM conventions to work, while generating the correct fingerprint dynamically without post processing the strings. Change-Id: Ic5913e587400dd38c04aa05de3a7becced77a59e Conflicts: core/Makefile --- core/Makefile | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/core/Makefile b/core/Makefile index 2bb7a2b6d..11f7c4f9b 100644 --- a/core/Makefile +++ b/core/Makefile @@ -124,8 +124,18 @@ endif BUILD_VERSION_TAGS += $(BUILD_KEYS) BUILD_VERSION_TAGS := $(subst $(space),$(comma),$(sort $(BUILD_VERSION_TAGS))) +# If the final fingerprint should be different than what was used by the build system, +# we can allow that too. +ifeq ($(TARGET_VENDOR_PRODUCT_NAME),) +TARGET_VENDOR_PRODUCT_NAME := $(TARGET_PRODUCT) +endif + +ifeq ($(TARGET_VENDOR_DEVICE_NAME),) +TARGET_VENDOR_DEVICE_NAME := $(TARGET_DEVICE) +endif + # A human-readable string that descibes this build in detail. -build_desc := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(BUILD_NUMBER) $(BUILD_VERSION_TAGS) +build_desc := $(TARGET_VENDOR_PRODUCT_NAME)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(BUILD_NUMBER) $(BUILD_VERSION_TAGS) $(intermediate_system_build_prop): PRIVATE_BUILD_DESC := $(build_desc) # The string used to uniquely identify the combined build and product; used by the OTA server. @@ -137,7 +147,7 @@ ifeq (,$(strip $(BUILD_FINGERPRINT))) else BF_BUILD_NUMBER := $(BUILD_NUMBER) endif - BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_PRODUCT)/$(TARGET_DEVICE):$(PLATFORM_VERSION)/$(BUILD_ID)/$(BF_BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS) + BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_VENDOR_PRODUCT_NAME)/$(TARGET_VENDOR_DEVICE_NAME):$(PLATFORM_VERSION)/$(BUILD_ID)/$(BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS) endif ifneq ($(words $(BUILD_FINGERPRINT)),1) $(error BUILD_FINGERPRINT cannot contain spaces: "$(BUILD_FINGERPRINT)") @@ -203,9 +213,9 @@ ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OEM_PROPERTIES),) endif $(hide) TARGET_BUILD_TYPE="$(TARGET_BUILD_VARIANT)" \ TARGET_BUILD_FLAVOR="$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)" \ - TARGET_DEVICE="$(TARGET_DEVICE)" \ + TARGET_DEVICE="$(TARGET_VENDOR_DEVICE_NAME)" \ SLIM_DEVICE="$(TARGET_DEVICE)" \ - PRODUCT_NAME="$(TARGET_PRODUCT)" \ + PRODUCT_NAME="$(TARGET_VENDOR_PRODUCT_NAME)" \ PRODUCT_BRAND="$(PRODUCT_BRAND)" \ PRODUCT_DEFAULT_LOCALE="$(call get-default-product-locale,$(PRODUCT_LOCALES))" \ PRODUCT_DEFAULT_WIFI_CHANNELS="$(PRODUCT_DEFAULT_WIFI_CHANNELS)" \ From afe14134622f3343f758d4dab67f0922a4cc16cc Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Mon, 11 Nov 2013 00:51:48 +0000 Subject: [PATCH 068/309] For user builds, use a release build id in the description if one exists Companion change to Ic5913e587400dd38c04aa05de3a7becced77a59e Change-Id: If5edbf56d567701f96f0ba46af9a90ffa8310c24 --- core/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index 11f7c4f9b..c8b139f11 100644 --- a/core/Makefile +++ b/core/Makefile @@ -134,8 +134,12 @@ ifeq ($(TARGET_VENDOR_DEVICE_NAME),) TARGET_VENDOR_DEVICE_NAME := $(TARGET_DEVICE) endif +ifeq ($(TARGET_VENDOR_RELEASE_BUILD_ID),) +TARGET_VENDOR_RELEASE_BUILD_ID := $(BUILD_NUMBER) +endif + # A human-readable string that descibes this build in detail. -build_desc := $(TARGET_VENDOR_PRODUCT_NAME)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(BUILD_NUMBER) $(BUILD_VERSION_TAGS) +build_desc := $(TARGET_VENDOR_PRODUCT_NAME)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(TARGET_VENDOR_RELEASE_BUILD_ID) $(BUILD_VERSION_TAGS) $(intermediate_system_build_prop): PRIVATE_BUILD_DESC := $(build_desc) # The string used to uniquely identify the combined build and product; used by the OTA server. @@ -147,7 +151,7 @@ ifeq (,$(strip $(BUILD_FINGERPRINT))) else BF_BUILD_NUMBER := $(BUILD_NUMBER) endif - BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_VENDOR_PRODUCT_NAME)/$(TARGET_VENDOR_DEVICE_NAME):$(PLATFORM_VERSION)/$(BUILD_ID)/$(BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS) + BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_VENDOR_PRODUCT_NAME)/$(TARGET_VENDOR_DEVICE_NAME):$(PLATFORM_VERSION)/$(BUILD_ID)/$(TARGET_VENDOR_RELEASE_BUILD_ID):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS) endif ifneq ($(words $(BUILD_FINGERPRINT)),1) $(error BUILD_FINGERPRINT cannot contain spaces: "$(BUILD_FINGERPRINT)") From 88e6e5f245538f2317afd8fc1ecb8a1bb396b64e Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Mon, 14 May 2012 16:14:36 -0700 Subject: [PATCH 069/309] makerecipe: WIP to generate manifest branches for releases Change-Id: I1804c180844043692f788275c8998307e348001b --- envsetup.sh | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index e94141d16..220be0b5f 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1569,8 +1569,12 @@ function cmremote() GERRIT_REMOTE=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g) if [ -z "$GERRIT_REMOTE" ] then - echo Unable to set up the git remote, are you in the root of the repo? - return 0 + GERRIT_REMOTE=$(cat .git/config | grep http://github.com | awk '{ print $NF }' | sed s#http://github.com/##g) + if [ -z "$GERRIT_REMOTE" ] + then + echo Unable to set up the git remote, are you in the root of the repo? + return 0 + fi fi CMUSER=`git config --get review.review.cyanogenmod.com.username` if [ -z "$CMUSER" ] @@ -1581,6 +1585,29 @@ function cmremote() fi echo You can now push to "cmremote". } +export -f cmremote + +function makerecipe() { + if [ -z "$1" ] + then + echo "No branch name provided." + return 1 + fi + cd android + sed -i s/'default revision=.*'/'default revision="refs\/heads\/'$1'"'/ default.xml + git commit -a -m "$1" + cd .. + + repo forall -c ' + + if [ "$REPO_REMOTE" == "github" ] + then + pwd + cmremote + git push cmremote HEAD:refs/heads/'$1' + fi + ' +} function cmgerrit() { if [ $# -eq 0 ]; then From 6ca35a7eb8a11aa6575c6220aa9b35acc1c0d19b Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Mon, 17 Sep 2012 11:33:18 -0700 Subject: [PATCH 070/309] build: Add "aospremote" command * Adds the git remote for the matching AOSP repository. Change-Id: Iad03fb95874ff39a39083218aec0d834e82b4480 --- envsetup.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 220be0b5f..35cab8aca 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -25,6 +25,7 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - cmremote: Add git remote for CM Gerrit Review - cmgerrit: A Git wrapper that fetches/pushes patch from/to CM Gerrit Review - cmrebase: Rebase a Gerrit change and push it again +- aospremote: Add git remote for matching AOSP repository - mka: Builds using SCHED_BATCH on all processors - reposync: Parallel repo sync using ionice and SCHED_BATCH - installboot: Installs a boot.img to the connected device. @@ -1587,6 +1588,23 @@ function cmremote() } export -f cmremote +function aospremote() +{ + git remote rm aosp 2> /dev/null + if [ ! -d .git ] + then + echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. + fi + PROJECT=`pwd | sed s#$ANDROID_BUILD_TOP/##g` + if (echo $PROJECT | grep -qv "^device") + then + PFX="platform/" + fi + git remote add aosp https://android.googlesource.com/$PFX$PROJECT + echo "Remote 'aosp' created" +} +export -f aospremote + function makerecipe() { if [ -z "$1" ] then From 06ef0502b006c27ac5dae5c4a2b2e3d55ec5da20 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 27 Oct 2013 13:34:36 -0700 Subject: [PATCH 071/309] build: Add cafremote command to envsetup * So lazy! Change-Id: I99a988180abb4fb486d4ebbb842bb9eda03bf1bc --- envsetup.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 35cab8aca..02544731c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -26,6 +26,7 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - cmgerrit: A Git wrapper that fetches/pushes patch from/to CM Gerrit Review - cmrebase: Rebase a Gerrit change and push it again - aospremote: Add git remote for matching AOSP repository +- cafremote: Add git remote for matching CodeAurora repository. - mka: Builds using SCHED_BATCH on all processors - reposync: Parallel repo sync using ionice and SCHED_BATCH - installboot: Installs a boot.img to the connected device. @@ -1946,6 +1947,24 @@ function fixup_common_out_dir() { fi } +function cafremote() +{ + git remote rm caf 2> /dev/null + if [ ! -d .git ] + then + echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. + fi + PROJECT=`pwd | sed s#$ANDROID_BUILD_TOP/##g` + if (echo $PROJECT | grep -qv "^device") + then + PFX="platform/" + fi + git remote add caf git://codeaurora.org/$PFX$PROJECT + echo "Remote 'caf' created" +} +export -f cafremote + + function installboot() { if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ]; From 737b0a9ed1dc35f955c1bcb1db73230134037cc5 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sat, 16 Nov 2013 03:48:30 -0800 Subject: [PATCH 072/309] envsetup: Fix remote functions to deal with symlinks Change-Id: I6f35dc30dda22149b2c37e1c6c7d41ea7239d3bb --- envsetup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 02544731c..0b0f48ed6 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1596,7 +1596,7 @@ function aospremote() then echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. fi - PROJECT=`pwd | sed s#$ANDROID_BUILD_TOP/##g` + PROJECT=`pwd -P | sed s#$ANDROID_BUILD_TOP/##g` if (echo $PROJECT | grep -qv "^device") then PFX="platform/" @@ -1954,7 +1954,7 @@ function cafremote() then echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. fi - PROJECT=`pwd | sed s#$ANDROID_BUILD_TOP/##g` + PROJECT=`pwd -P | sed s#$ANDROID_BUILD_TOP/##g` if (echo $PROJECT | grep -qv "^device") then PFX="platform/" From 1d3615fc23e82ecae04e91b5a714b99005bf5aee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20L=C3=B3pez?= Date: Sun, 3 Nov 2013 13:04:27 -0300 Subject: [PATCH 073/309] envsetup: drop unnecesary exports export -f is a bashism and we don't actually need it, so lets remove it. Change-Id: I9228cef0c4a9543b3d34fdc39a368ad0238431e2 Conflicts: envsetup.sh --- envsetup.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 0b0f48ed6..711a57025 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1587,7 +1587,6 @@ function cmremote() fi echo You can now push to "cmremote". } -export -f cmremote function aospremote() { @@ -1604,7 +1603,6 @@ function aospremote() git remote add aosp https://android.googlesource.com/$PFX$PROJECT echo "Remote 'aosp' created" } -export -f aospremote function makerecipe() { if [ -z "$1" ] @@ -1962,7 +1960,6 @@ function cafremote() git remote add caf git://codeaurora.org/$PFX$PROJECT echo "Remote 'caf' created" } -export -f cafremote function installboot() From 2fac263eb468e875d04ff554dc8002443f8039f0 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sat, 28 Dec 2013 15:48:20 -0800 Subject: [PATCH 074/309] build: Add QCOM target-specific config * Needed to support global DirectTract config on legacy targets * Let's also eliminate some boilerplate Change-Id: I736c10a5e7e1f3d1e0de9e60f29b60add276f151 Conflicts: core/config.mk --- core/config.mk | 3 +++ core/qcom_target.mk | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 core/qcom_target.mk diff --git a/core/config.mk b/core/config.mk index dc2b7e059..4e8024c5b 100644 --- a/core/config.mk +++ b/core/config.mk @@ -687,6 +687,9 @@ endif RSCOMPAT_32BIT_ONLY_API_LEVELS := 8 9 10 11 12 13 14 15 16 17 18 19 20 RSCOMPAT_NO_USAGEIO_API_LEVELS := 8 9 10 11 12 13 +# Rules for QCOM targets +include $(BUILD_SYSTEM)/qcom_target.mk + ifneq ($(SLIM_BUILD),) ## We need to be sure the global selinux policies are included ## last, to avoid accidental resetting by device configs diff --git a/core/qcom_target.mk b/core/qcom_target.mk new file mode 100644 index 000000000..b993e8f76 --- /dev/null +++ b/core/qcom_target.mk @@ -0,0 +1,19 @@ +# Target-specific configuration + +# Enable DirectTrack on QCOM legacy boards +ifeq ($(BOARD_USES_QCOM_HARDWARE),true) + + TARGET_GLOBAL_CFLAGS += -DQCOM_HARDWARE + + ifeq ($(TARGET_USES_QCOM_BSP),true) + TARGET_GLOBAL_CFLAGS += -DQCOM_BSP + endif + + # Enable DirectTrack for legacy targets + ifneq ($(filter caf bfam,$(TARGET_QCOM_AUDIO_VARIANT)),) + ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) + TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK + endif + endif +endif + From e5c92dd1d7fb82c9225bd636dba44db9e506794f Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Thu, 19 Dec 2013 01:00:32 -0600 Subject: [PATCH 075/309] qcom_utils: Update list of QCOM_BOARD_PLATFORMS Add msm8226 (Moto G) and other upcoming platforms Change-Id: Icf895cbcf86791ca800636a1c0893b3a905a27a0 --- core/qcom_utils.mk | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk index 71ba3ad40..279f06f2d 100755 --- a/core/qcom_utils.mk +++ b/core/qcom_utils.mk @@ -1,11 +1,15 @@ # Board platforms lists to be used for # TARGET_BOARD_PLATFORM specific featurization -QCOM_BOARD_PLATFORMS := msm7x27 -QCOM_BOARD_PLATFORMS += msm7x27a QCOM_BOARD_PLATFORMS += msm7x30 +QCOM_BOARD_PLATFORMS += msm8226 +QCOM_BOARD_PLATFORMS += msm8610 QCOM_BOARD_PLATFORMS += msm8660 +QCOM_BOARD_PLATFORMS += msm8916 QCOM_BOARD_PLATFORMS += msm8960 QCOM_BOARD_PLATFORMS += msm8974 +QCOM_BOARD_PLATFORMS += mpq8092 +QCOM_BOARD_PLATFORMS += msm_bronze +QCOM_BOARD_PLATFORMS += apq8084 MSM7K_BOARD_PLATFORMS := msm7x30 MSM7K_BOARD_PLATFORMS += msm7x27 From 994fe17921c344789f43b21f3c7c402d098680a7 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Mon, 30 Dec 2013 11:15:18 -0800 Subject: [PATCH 076/309] build: Fix cflags for QC targets Change-Id: I2281bec3afb4d80e80845718d880dc24ef7baf32 --- core/qcom_target.mk | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index b993e8f76..e65a6fc0c 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -4,16 +4,18 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_GLOBAL_CFLAGS += -DQCOM_HARDWARE + TARGET_GLOBAL_CPPFLAGS += -DQCOM_HARDWARE ifeq ($(TARGET_USES_QCOM_BSP),true) TARGET_GLOBAL_CFLAGS += -DQCOM_BSP + TARGET_GLOBAL_CPPFLAGS += -DQCOM_BSP endif # Enable DirectTrack for legacy targets ifneq ($(filter caf bfam,$(TARGET_QCOM_AUDIO_VARIANT)),) ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK + TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK endif endif endif - From 6db5f0aeb1f0de2118df9088ec98ab03e615689f Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Fri, 3 Jan 2014 02:46:15 +0000 Subject: [PATCH 077/309] Allow individual projects to enforce a property's value Some projects require system properties to be set to a specific value (for example, a shared library needing a property pointing to its own path) in order to work correctly, but some device configurations are mistakenly setting those properties with the wrong value (usually inherited from the original OEM build). "PRODUCT_PROPERTY_UBER_OVERRIDES += property=value" can (and should) be used in that project's makefile to ensure the value is the correct one. This variable is intended for software projects, and should never be used in product makefiles (BoardConfig, cm.mk, AndroidProduct) Change-Id: I1986e7c444e51cce8b198e43fdc793fad16d6276 --- core/Makefile | 2 +- tools/post_process_props.py | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/core/Makefile b/core/Makefile index c8b139f11..280129681 100644 --- a/core/Makefile +++ b/core/Makefile @@ -265,7 +265,7 @@ endif $(hide) $(foreach line,$(ADDITIONAL_BUILD_PROPERTIES), \ echo "$(line)" >> $@;) $(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@ - $(hide) build/tools/post_process_props.py $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST) + $(hide) build/tools/post_process_props.py $@ "$(PRODUCT_PROPERTY_UBER_OVERRIDES)" $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST) build_desc := diff --git a/tools/post_process_props.py b/tools/post_process_props.py index fa6106f0d..cbbf1f1b5 100755 --- a/tools/post_process_props.py +++ b/tools/post_process_props.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys +import os, sys # Usage: post_process_props.py file.prop [blacklist_key, ...] # Blacklisted keys are removed from the property file, if present @@ -27,7 +27,14 @@ # Put the modifications that you need to make into the /system/build.prop into this # function. The prop object has get(name) and put(name,value) methods. -def mangle_build_prop(prop): +def mangle_build_prop(prop, overrides): + if len(overrides) == 0: + return + overridelist = overrides.replace(" ",",").split(",") + for proppair in overridelist: + values = proppair.split("=") + prop.put(values[0], values[1]) + pass # Put the modifications that you need to make into the /default.prop into this @@ -110,6 +117,10 @@ def write(self, f): def main(argv): filename = argv[1] + if (len(argv) > 2): + extraargs = argv[2] + else: + extraargs = "" f = open(filename) lines = f.readlines() f.close() @@ -117,7 +128,7 @@ def main(argv): properties = PropFile(lines) if filename.endswith("/build.prop"): - mangle_build_prop(properties) + mangle_build_prop(properties, extraargs) elif filename.endswith("/default.prop"): mangle_default_prop(properties) else: @@ -128,7 +139,7 @@ def main(argv): sys.exit(1) # Drop any blacklisted keys - for key in argv[2:]: + for key in argv[3:]: properties.delete(key) f = open(filename, 'w+') From 1705f58adab44f770cc5f64b6487d7199144ce39 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Wed, 22 Jan 2014 15:51:46 -0800 Subject: [PATCH 078/309] Store the value from TARGET_OTA_ASSERT_DEVICE to a file This is needed so it can be read in when re-signing a package to allow for the proper device asserts. Change-Id: I8f3bb491bbbaa4d2b827a45ef83f12c6c83e6712 --- core/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/core/Makefile b/core/Makefile index 280129681..a427c0b0e 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1693,6 +1693,7 @@ endif $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) @echo "$(OTA_FROM_TARGET_SCRIPT)" > $(PRODUCT_OUT)/ota_script_path + @echo "$(override_device)" > $(PRODUCT_OUT)/ota_override_device @echo -e ${CL_YLW}"Package OTA:"${CL_RST}" $@" $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ $(OTA_FROM_TARGET_SCRIPT) -v \ From 73cb2f3bcfd2c7517d07fdc2322178b53e95c7c1 Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Wed, 9 Apr 2014 16:14:07 -0700 Subject: [PATCH 079/309] build: edify: use set_metadata for backuptool Also use saner permissions on backuptool.sh Change-Id: I50742b51867aa358f5924b8dc208833092a35bd9 --- tools/releasetools/edify_generator.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 231200aa1..e4f70960b 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -136,8 +136,12 @@ def AssertSomeBootloader(self, *bootloaders): def RunBackup(self, command): self.script.append('package_extract_file("system/bin/backuptool.sh", "/tmp/backuptool.sh");') self.script.append('package_extract_file("system/bin/backuptool.functions", "/tmp/backuptool.functions");') - self.script.append('set_perm(0, 0, 0777, "/tmp/backuptool.sh");') - self.script.append('set_perm(0, 0, 0644, "/tmp/backuptool.functions");') + if not self.info.get("use_set_metadata", False): + self.script.append('set_perm(0, 0, 0755, "/tmp/backuptool.sh");') + self.script.append('set_perm(0, 0, 0644, "/tmp/backuptool.functions");') + else: + self.script.append('set_metadata("/tmp/backuptool.sh", "uid", 0, "gid", 0, "mode", 0755);') + self.script.append('set_metadata("/tmp/backuptool.functions", "uid", 0, "gid", 0, "mode", 0644);') self.script.append(('run_program("/tmp/backuptool.sh", "%s");' % command)) if command == "restore": self.script.append('delete("/system/bin/backuptool.sh");') From 03886ef8170236fb642a672e3f73919bc5028bf5 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Tue, 1 Oct 2013 20:42:45 +0100 Subject: [PATCH 080/309] tools: getb64key - print the base64 version of a PEM public key file Change-Id: Ia94ff560c88dfe85c3fa55db5c8219aa0d3419ee --- tools/getb64key.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100755 tools/getb64key.py diff --git a/tools/getb64key.py b/tools/getb64key.py new file mode 100755 index 000000000..0bb63e1a8 --- /dev/null +++ b/tools/getb64key.py @@ -0,0 +1,17 @@ +#!/usr/bin/python + +import base64 +import sys +import os + +pkFile = open(sys.argv[1], 'rb').readlines() +base64Key = "" +inCert = False +for line in pkFile: + if line.startswith("-"): + inCert = not inCert + continue + + base64Key += line.strip() + +print base64.b16encode(base64.b64decode(base64Key)).lower() From bf5fead51fac8014e7b04f35fa67fafd2934a5e8 Mon Sep 17 00:00:00 2001 From: Andrew Bartholomew Date: Sat, 10 May 2014 10:58:07 -0400 Subject: [PATCH 081/309] build: use /usr/bin/env to call python Hardcoding the path to python instead of env is not recommended. This can break the build in some Linux environments that link /usr/bin/python to python3. Change-Id: I4c62eb391acc6d468ce24ac8640834e965e76e78 --- core/checktree | 2 +- tools/getb64key.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/checktree b/core/checktree index b0b9cfab6..287268328 100755 --- a/core/checktree +++ b/core/checktree @@ -1,4 +1,4 @@ -#!/usr/bin/python -E +#!/usr/bin/env python -E import sys, os, re diff --git a/tools/getb64key.py b/tools/getb64key.py index 0bb63e1a8..a0cd1c3ec 100755 --- a/tools/getb64key.py +++ b/tools/getb64key.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python import base64 import sys From 27576d0d91e3878c5733b5e804a4ed77ffa966fd Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Fri, 16 May 2014 13:46:24 -0700 Subject: [PATCH 082/309] build: Set ccache base to build top This enables ccache sharing between different build tree paths. Change-Id: I99636bdd2779a6ccf0649f027e35cf67df44b6ae --- core/combo/select.mk | 54 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/core/combo/select.mk b/core/combo/select.mk index df12e7e38..967a3b2e9 100644 --- a/core/combo/select.mk +++ b/core/combo/select.mk @@ -47,3 +47,57 @@ $(combo_var_prefix)STATIC_LIB_SUFFIX := .a # Now include the combo for this specific target. include $(BUILD_COMBOS)/$(combo_target)$(combo_os_arch).mk + +ifneq ($(USE_CCACHE),) + # The default check uses size and modification time, causing false misses + # since the mtime depends when the repo was checked out + export CCACHE_COMPILERCHECK := content + + # See man page, optimizations to get more cache hits + # implies that __DATE__ and __TIME__ are not critical for functionality. + # Ignore include file modification time since it will depend on when + # the repo was checked out + export CCACHE_SLOPPINESS := time_macros,include_file_mtime,file_macro + + # Turn all preprocessor absolute paths into relative paths. + # Fixes absolute paths in preprocessed source due to use of -g. + # We don't really use system headers much so the rootdir is + # fine; ensures these paths are relative for all Android trees + # on a workstation. + export CCACHE_BASEDIR := $(ANDROID_BUILD_TOP) + + # Workaround for ccache with clang. + # See http://petereisentraut.blogspot.com/2011/09/ccache-and-clang-part-2.html + export CCACHE_CPP2 := true + + CCACHE_HOST_TAG := $(HOST_PREBUILT_TAG) + # If we are cross-compiling Windows binaries on Linux + # then use the linux ccache binary instead. + ifeq ($(HOST_OS)-$(BUILD_OS),windows-linux) + CCACHE_HOST_TAG := linux-$(HOST_PREBUILT_ARCH) + endif + ccache := prebuilts/misc/$(CCACHE_HOST_TAG)/ccache/ccache + # Check that the executable is here. + ccache := $(strip $(wildcard $(ccache))) + ifdef ccache + ifndef CC_WRAPPER + CC_WRAPPER := $(ccache) + endif + ifndef CXX_WRAPPER + CXX_WRAPPER := $(ccache) + endif + ccache = + endif +endif + +# The C/C++ compiler can be wrapped by setting the CC/CXX_WRAPPER vars. +ifdef CC_WRAPPER + ifneq ($(CC_WRAPPER),$(firstword $($(combo_var_prefix)CC))) + $(combo_var_prefix)CC := $(CC_WRAPPER) $($(combo_var_prefix)CC) + endif +endif +ifdef CXX_WRAPPER + ifneq ($(CXX_WRAPPER),$(firstword $($(combo_var_prefix)CXX))) + $(combo_var_prefix)CXX := $(CXX_WRAPPER) $($(combo_var_prefix)CXX) + endif +endif From 36171bb35483927cdbe3823ff1e5abc0867c6f26 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 18 May 2014 15:34:30 -0700 Subject: [PATCH 083/309] build: Include an extra device.mk Change-Id: I4373f78f57c81e46affc2f6a6bc854bf4c3d00db --- core/main.mk | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/core/main.mk b/core/main.mk index f67362e1d..885cd5611 100644 --- a/core/main.mk +++ b/core/main.mk @@ -102,10 +102,17 @@ include $(BUILD_SYSTEM)/config.mk # be generated correctly include $(BUILD_SYSTEM)/cleanbuild.mk +# Bring in Qualcomm helper macros +include $(BUILD_SYSTEM)/qcom_utils.mk + # Include the google-specific config -include vendor/google/build/config.mk +# Include the extra device config +-include vendor/extra/device.mk + VERSION_CHECK_SEQUENCE_NUMBER := 5 + -include $(OUT_DIR)/versions_checked.mk ifneq ($(VERSION_CHECK_SEQUENCE_NUMBER),$(VERSIONS_CHECKED)) @@ -244,9 +251,6 @@ endif # Bring in standard build system definitions. include $(BUILD_SYSTEM)/definitions.mk -# Bring in Qualcomm helper macros -include $(BUILD_SYSTEM)/qcom_utils.mk - # Bring in dex_preopt.mk include $(BUILD_SYSTEM)/dex_preopt.mk From b401f77623ad467940128400a7342ea3d6d34f06 Mon Sep 17 00:00:00 2001 From: Mike Grissom Date: Mon, 26 Aug 2013 16:09:51 -0700 Subject: [PATCH 084/309] Don't set CCACHE_BASEDIR if its already set * enable use of shared ccache Change-Id: Ic708a5d5169291d674b167aa2c87bf2c8adaafae --- core/combo/select.mk | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/combo/select.mk b/core/combo/select.mk index 967a3b2e9..5842705ff 100644 --- a/core/combo/select.mk +++ b/core/combo/select.mk @@ -64,7 +64,9 @@ ifneq ($(USE_CCACHE),) # We don't really use system headers much so the rootdir is # fine; ensures these paths are relative for all Android trees # on a workstation. - export CCACHE_BASEDIR := $(ANDROID_BUILD_TOP) + ifeq ($(CCACHE_BASEDIR),) + export CCACHE_BASEDIR := $(ANDROID_BUILD_TOP) + endif # Workaround for ccache with clang. # See http://petereisentraut.blogspot.com/2011/09/ccache-and-clang-part-2.html From bec9114ec0fde71415809e01167aa569c97eade7 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Fri, 9 May 2014 22:21:28 +0100 Subject: [PATCH 085/309] ota: Make sure we don't install on top of an incompatible system If the system has data present, make sure said data doesn't depend on a system signature different from our own. If we can't find our platform key, abort the installation Change-Id: I16572daf9464de326a9d0d126597cfbf03208f94 --- tools/releasetools/edify_generator.py | 11 +++++++++++ tools/releasetools/ota_from_target_files | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index e4f70960b..75e5004a2 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -147,6 +147,17 @@ def RunBackup(self, command): self.script.append('delete("/system/bin/backuptool.sh");') self.script.append('delete("/system/bin/backuptool.functions");') + def ValidateSignatures(self, command): + if command == "cleanup": + self.script.append('delete("/system/bin/otasigcheck.sh");') + else: + self.script.append('package_extract_file("system/bin/otasigcheck.sh", "/tmp/otasigcheck.sh");') + self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");') + self.script.append('set_metadata("/tmp/otasigcheck.sh", "uid", 0, "gid", 0, "mode", 0755);') + self.script.append('run_program("/tmp/otasigcheck.sh");') + ## Hax: a failure from run_program doesn't trigger an abort, so have it change the key value and check for "INVALID" + self.script.append('sha1_check(read_file("/tmp/releasekey"),"7241e92725436afc79389d4fc2333a2aa8c20230") && abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') + def ShowProgress(self, frac, dur): """Update the progress bar, advancing it over 'frac' over the next 'dur' seconds. 'dur' may be zero to advance it via SetProgress diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 676369ace..fb3fc7000 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -592,6 +592,10 @@ else if get_stage("%(bcb_dev)s") == "3/3" then device_specific.FullOTA_InstallBegin() + script.Mount("/data") + script.ValidateSignatures("data") + script.Unmount("/data") + if OPTIONS.backuptool: script.Mount("/system") script.RunBackup("backup") @@ -668,6 +672,8 @@ else if get_stage("%(bcb_dev)s") == "3/3" then common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) common.ZipWriteStr(output_zip, "boot.img", boot_img.data) + script.ValidateSignatures("cleanup") + if OPTIONS.backuptool: script.ShowProgress(0.02, 10) script.RunBackup("restore") From 6a9338769a3a54955ab078cf1ffc893a11f3f612 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Thu, 10 Jul 2014 10:38:59 -0700 Subject: [PATCH 086/309] build: Store the jenkins build number in misc_info If the jenkins build number is available, list it as build_number in the misc_info.txt file. Change-Id: I6500c6b53661f2904dff738d25d8a8d24a7dca80 --- core/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/Makefile b/core/Makefile index a427c0b0e..87f8afdc0 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1628,6 +1628,9 @@ endif ifneq ($(OEM_THUMBPRINT_PROPERTIES),) # OTA scripts are only interested in fingerprint related properties $(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt +endif +ifdef BUILD_NO + $(hide) echo "build_number=$(BUILD_NO)" >> $(zip_root)/META/misc_info.txt endif $(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt) $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ From 7f69b960deb3fafab1f2f6b05072814026b73474 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Tue, 20 May 2014 02:52:23 -0500 Subject: [PATCH 087/309] envsetup: fixup dopush * After clobber, $OUT does not exist and tee cannot create .log; fix this by calling mkdir -p $OUT * Make sure Copy and Install are at the beginning of the log line being analyzed and also search for ':' so that "Copying:" lines don't count * Fix Copy file list by quoting so shell doesn't think this is a command: $LOC $(...) * Fix 'only copy files from $OUT' now that multiple file pushing works right * Only stop java services once (if needed) and then wait to restart services until after all files have been pushed * Change location of SystemUI.apk to priv-app Signed-off-by: Chirayu Desai Change-Id: I65edd34bf445b28c2638cb3e9621719121fb962f --- envsetup.sh | 46 +++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 711a57025..fd84e802c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -2079,37 +2079,45 @@ function dopush() sleep 0.3 adb remount &> /dev/null + mkdir -p $OUT $func $* | tee $OUT/.log # Install: - LOC=$(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Install' | cut -d ':' -f 2) + LOC="$(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Install: ' | cut -d ':' -f 2)" # Copy: - LOC=$LOC $(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep 'Copy' | cut -d ':' -f 2) + LOC="$LOC $(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Copy: ' | cut -d ':' -f 2)" + stop_n_start=false for FILE in $LOC; do - # Get target file name (i.e. system/bin/adb) - TARGET=$(echo $FILE | sed "s#$OUT/##") + # Make sure file is in $OUT/system + case $FILE in + $OUT/system/*) + # Get target file name (i.e. /system/bin/adb) + TARGET=$(echo $FILE | sed "s#$OUT##") + ;; + *) continue ;; + esac - # Don't send files that are not under /system or /data - if [ ! "echo $TARGET | egrep '^system\/' > /dev/null" -o \ - "echo $TARGET | egrep '^data\/' > /dev/null" ] ; then - continue - else - case $TARGET in - system/app/SystemUI.apk|system/framework/*) - stop_n_start=true + case $TARGET in + /system/priv-app/SystemUI.apk|/system/framework/*) + # Only need to stop services once + if ! $stop_n_start; then + adb shell stop + stop_n_start=true + fi + echo "Pushing: $TARGET" + adb push $FILE $TARGET ;; *) - stop_n_start=false + echo "Pushing: $TARGET" + adb push $FILE $TARGET ;; - esac - if $stop_n_start ; then adb shell stop ; fi - echo "Pushing: $TARGET" - adb push $FILE $TARGET - if $stop_n_start ; then adb shell start ; fi - fi + esac done + if $stop_n_start; then + adb shell start + fi rm -f $OUT/.log return 0 } From e827d652fc30b93a7c375833e2a05401701795b5 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Wed, 5 Nov 2014 17:08:57 +0000 Subject: [PATCH 088/309] qcom_utils: Make "is-vendor-board-platform,XX" depend on BOARD_HAS_XX_HARDWARE Prevent qc's code from wrongly kicking in for AOSP-derived configurations --- core/qcom_utils.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk index 279f06f2d..1177ad438 100755 --- a/core/qcom_utils.mk +++ b/core/qcom_utils.mk @@ -99,7 +99,7 @@ endef # $(call get-vendor-board-platforms,v) # returns list of board platforms for vendor v define get-vendor-board-platforms -$($(1)_BOARD_PLATFORMS) +$(if $(call match-word,$(BOARD_USES_$(1)_HARDWARE),true),$($(1)_BOARD_PLATFORMS)) endef # $(call is-board-platform,bp) From 9e912f0900e5d521e93ff482846119a440d8637a Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Wed, 5 Nov 2014 18:16:43 +0000 Subject: [PATCH 089/309] ota: Include copies of the recovery scripts even if shipping in block mode --- tools/releasetools/ota_from_target_files | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index fb3fc7000..562041832 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -592,11 +592,17 @@ else if get_stage("%(bcb_dev)s") == "3/3" then device_specific.FullOTA_InstallBegin() + if block_based: + common.ZipWriteStr(output_zip, "system/bin/otasigcheck.sh", + ""+input_zip.read("SYSTEM/bin/otasigcheck.sh")) script.Mount("/data") script.ValidateSignatures("data") script.Unmount("/data") if OPTIONS.backuptool: + if block_based: + common.ZipWriteStr(output_zip, "system/bin/backuptool.sh", + ""+input_zip.read("SYSTEM/bin/backuptool.sh")) script.Mount("/system") script.RunBackup("backup") script.Unmount("/system") From 1a0a9f8438fa18e2fa1fd59ee539195fccd04073 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Thu, 28 Aug 2014 16:51:02 -0500 Subject: [PATCH 090/309] Edify: Add AssertSomeBaseband Allow releasetools.py to assert a baseband version. Works just like AssertSomeBootloader. Change-Id: Ic8eb341cef1d777d983be25ba21a3bc545819c29 --- tools/releasetools/edify_generator.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 75e5004a2..8822d3d31 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -133,6 +133,14 @@ def AssertSomeBootloader(self, *bootloaders): ");") self.script.append(self.WordWrap(cmd)) + def AssertSomeBaseband(self, *basebands): + """Assert that the baseband version is one of *basebands.""" + cmd = ("assert(" + + " ||\0".join(['getprop("ro.baseband") == "%s"' % (b,) + for b in basebands]) + + ");") + self.script.append(self._WordWrap(cmd)) + def RunBackup(self, command): self.script.append('package_extract_file("system/bin/backuptool.sh", "/tmp/backuptool.sh");') self.script.append('package_extract_file("system/bin/backuptool.functions", "/tmp/backuptool.functions");') From a856423f2ca83c93f21e8899716671f67c367dc1 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Mon, 18 Aug 2014 14:28:42 -0700 Subject: [PATCH 091/309] ota_from_target_files: Validate signatures after possible data wipe If the data is being wiped, we want to validate the signature after this step. This is also a good check that the data wipe succeeded. Change-Id: Iaecfddfb2010944de0c97e7bb21d8e581f20deec --- tools/releasetools/ota_from_target_files | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 562041832..0003d26ff 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -592,13 +592,6 @@ else if get_stage("%(bcb_dev)s") == "3/3" then device_specific.FullOTA_InstallBegin() - if block_based: - common.ZipWriteStr(output_zip, "system/bin/otasigcheck.sh", - ""+input_zip.read("SYSTEM/bin/otasigcheck.sh")) - script.Mount("/data") - script.ValidateSignatures("data") - script.Unmount("/data") - if OPTIONS.backuptool: if block_based: common.ZipWriteStr(output_zip, "system/bin/backuptool.sh", @@ -614,6 +607,13 @@ else if get_stage("%(bcb_dev)s") == "3/3" then if HasVendorPartition(input_zip): system_progress -= 0.1 + if block_based: + common.ZipWriteStr(output_zip, "system/bin/otasigcheck.sh", + ""+input_zip.read("SYSTEM/bin/otasigcheck.sh")) + script.Mount("/data") + script.ValidateSignatures("data") + script.Unmount("/data") + if "selinux_fc" in OPTIONS.info_dict: WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) From a717574d0069a8a5c36e8752d1a5c25f882425dd Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Wed, 5 Nov 2014 19:58:59 +0000 Subject: [PATCH 092/309] ota: Fix recovery-script cleanups in block mode Change-Id: Iadb480f8062cfb7a0e6c9024e32ac0d71a7481d7 --- tools/releasetools/ota_from_target_files | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 0003d26ff..cdc211feb 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -596,6 +596,8 @@ else if get_stage("%(bcb_dev)s") == "3/3" then if block_based: common.ZipWriteStr(output_zip, "system/bin/backuptool.sh", ""+input_zip.read("SYSTEM/bin/backuptool.sh")) + common.ZipWriteStr(output_zip, "system/bin/backuptool.functions", + ""+input_zip.read("SYSTEM/bin/backuptool.functions")) script.Mount("/system") script.RunBackup("backup") script.Unmount("/system") @@ -678,11 +680,19 @@ else if get_stage("%(bcb_dev)s") == "3/3" then common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) common.ZipWriteStr(output_zip, "boot.img", boot_img.data) + if block_based: + script.Mount("/system") script.ValidateSignatures("cleanup") + if block_based: + script.Unmount("/system") if OPTIONS.backuptool: script.ShowProgress(0.02, 10) + if block_based: + script.Mount("/system") script.RunBackup("restore") + if block_based: + script.Unmount("/system") script.ShowProgress(0.05, 5) script.WriteRawImage("/boot", "boot.img") From 751188993fbe6035e227bb7ec9f69c1de21b12ee Mon Sep 17 00:00:00 2001 From: JustArchi Date: Sun, 22 Jun 2014 14:37:30 +0200 Subject: [PATCH 093/309] Build: Add support for specifying build variant in brunch/breakfast This little modification allows specifying build variant in brunch/ breakfast commands. For example we can use "brunch i9300 user" to build user variant instead of default userdebug. When no extra argument is given, userdebug is default. Change-Id: I935327252098ee74b34a815a023d2ac6b9a53a30 Conflicts: envsetup.sh --- envsetup.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index fd84e802c..32f300155 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -558,6 +558,7 @@ function omnom function breakfast() { target=$1 + local variant=$2 SLIM_DEVICES_ONLY="true" unset LUNCH_MENU_CHOICES add_lunch_combo full-eng @@ -577,8 +578,11 @@ function breakfast() # A buildtype was specified, assume a full device name lunch $target else - # This is probably just the CM model name - lunch slim_$target-userdebug + # This is probably just the SLIM model name + if [ -z "$variant" ]; then + variant="userdebug" + fi + lunch slim_$target-$variant fi fi return $? From ac63643f6812177bb9f39ea3ac9e605b1ca29c8b Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Wed, 10 Sep 2014 15:17:16 -0700 Subject: [PATCH 094/309] build: Clean up use of vendor/extra * extra_config.mk is not needed anymore * Include BoardConfigExtra.mk if available Change-Id: Icd0db0852ceee5cb5c701c9a40b829e1f3ebd69e --- core/config.mk | 1 + core/dumpvar.mk | 4 ++++ core/main.mk | 3 --- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/core/config.mk b/core/config.mk index 4e8024c5b..fe0c41620 100644 --- a/core/config.mk +++ b/core/config.mk @@ -161,6 +161,7 @@ include $(BUILD_SYSTEM)/envsetup.mk # See envsetup.mk for a description of SCAN_EXCLUDE_DIRS FIND_LEAVES_EXCLUDES := $(addprefix --prune=, $(OUT_DIR) $(SCAN_EXCLUDE_DIRS) .repo .git) +-include vendor/extra/BoardConfigExtra.mk # The build system exposes several variables for where to find the kernel # headers: # TARGET_DEVICE_KERNEL_HEADERS is automatically created for the current diff --git a/core/dumpvar.mk b/core/dumpvar.mk index b1810c595..7aa4afe24 100644 --- a/core/dumpvar.mk +++ b/core/dumpvar.mk @@ -84,5 +84,9 @@ $(info HOST_OS_EXTRA=$(HOST_OS_EXTRA)) $(info HOST_BUILD_TYPE=$(HOST_BUILD_TYPE)) $(info BUILD_ID=$(BUILD_ID)) $(info OUT_DIR=$(OUT_DIR)) +ifeq ($(CYNGN_TARGET),true) +$(info CYNGN_TARGET=$(CYNGN_TARGET)) +$(info CYNGN_FEATURES=$(CYNGN_FEATURES)) +endif $(info ============================================) endif diff --git a/core/main.mk b/core/main.mk index 885cd5611..9e51541d5 100644 --- a/core/main.mk +++ b/core/main.mk @@ -108,9 +108,6 @@ include $(BUILD_SYSTEM)/qcom_utils.mk # Include the google-specific config -include vendor/google/build/config.mk -# Include the extra device config --include vendor/extra/device.mk - VERSION_CHECK_SEQUENCE_NUMBER := 5 -include $(OUT_DIR)/versions_checked.mk From 0021dd3df8ded9b3146b7cded2816d5bfed8ed60 Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Thu, 18 Sep 2014 09:45:33 -0700 Subject: [PATCH 095/309] build: qcom: Set QCOM_*_PATH variables for hardware/qcom-* projects This consolidates a bunch of one-off logic scattered throughout the code base. Usage in Android.mk files is trivial: At top level, use e.g. "ifeq ($(call my-dir),$(QCOM_AUDIO_PATH))". This works for all variants, including non-variants (i.e. AOSP). Within subdirs, use e.g. hardware/qcom/audio => $(QCOM_AUDIO_PATH) Change-Id: Iee2497ea9a7efeb4ae9e861b84c532b19da7b69d build: Introduce project pathmap and use it for qcom variants The project pathmap tracks the path to the top level of a project. The following functions are provided: * project-set-path adds an entry. * project-set-path-variant adds a "variant" entry. * project-path-for retrieves an entry. To use as a guard in Android.mk: ifeq ($(call my-dir),$(call project-path-for,projectname)) To use for include paths in Android.mk: LOCAL_C_INCLUDES += $(call project-path-for,projectname)/... Set project pathmap for qcom project variants. Change-Id: I8dceca72a1ba80fc7b1830c5ab285d444f530457 build: Set QCOM variants for non-QCOM_HARDWARE defined targets * Nexus devices and others typically do not define the QCOM_HARDWARE flag, so the variant path should always default to the AOSP variant. * Unconditionally set the variant to the AOSP HAL by default. Change-Id: I714170897128f92718af266366cfcbf3136e8981 --- core/pathmap.mk | 30 ++++++++++++++++++++++++++++++ core/qcom_target.mk | 11 +++++++++++ 2 files changed, 41 insertions(+) diff --git a/core/pathmap.mk b/core/pathmap.mk index b300ff5f6..699eac15e 100644 --- a/core/pathmap.mk +++ b/core/pathmap.mk @@ -63,6 +63,36 @@ define include-path-for $(foreach n,$(1),$(patsubst $(n):%,%,$(filter $(n):%,$(pathmap_INCL)))) endef +# Enter project path into pathmap +# +# $(1): name +# $(2): path +# +define project-set-path +$(eval pathmap_PROJ += $(1):$(2)) +endef + +# Enter variant project path into pathmap +# +# $(1): name +# $(2): variable to check +# $(3): base path +# +define project-set-path-variant + $(call project-set-path,$(1),$(strip \ + $(if $($(2)), \ + $(3)-$($(2)), \ + $(3)))) +endef + +# Returns the path to the requested module's include directory, +# relative to the root of the source tree. +# +# $(1): a list of modules (or other named entities) to find the projects for +define project-path-for +$(foreach n,$(1),$(patsubst $(n):%,%,$(filter $(n):%,$(pathmap_PROJ)))) +endef + # # Many modules expect to be able to say "#include ", # so make it easy for them to find the correct path. diff --git a/core/qcom_target.mk b/core/qcom_target.mk index e65a6fc0c..ea91257a0 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -19,3 +19,14 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) endif endif endif + +# Populate the qcom hardware variants in the project pathmap. +define qcom-set-path-variant +$(call project-set-path-variant,qcom-$(2),TARGET_QCOM_$(1)_VARIANT,hardware/qcom/$(2)) +endef +$(call qcom-set-path-variant,AUDIO,audio) +$(call qcom-set-path-variant,CAMERA,camera) +$(call qcom-set-path-variant,DISPLAY,display) +$(call qcom-set-path-variant,GPS,gps) +$(call qcom-set-path-variant,MEDIA,media) +$(call qcom-set-path-variant,SENSORS,sensors) From 38f6cef2600024c93cce7239aa481131204f465e Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 23 Feb 2014 23:36:22 +0100 Subject: [PATCH 096/309] [HAX] Allow per-target dtbTool * Revert this when fixed Change-Id: I97ecb0448ae7bd5859454be290c5dde6248b2859 build: Default to dtbToolCM * The default dtbTool isn't the correct module name for the module that actually lives in device/qcom/common/dtbtool Change-Id: I80b427e3652b99742573bc4d2829e51645a8822b --- core/generate_extra_images.mk | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk index a0aa542e4..8cd18fd0d 100644 --- a/core/generate_extra_images.mk +++ b/core/generate_extra_images.mk @@ -89,7 +89,13 @@ ifeq ($(strip $(BUILD_TINY_ANDROID)),true) include device/qcom/common/dtbtool/Android.mk endif -DTBTOOL := $(HOST_OUT_EXECUTABLES)/dtbTool$(HOST_EXECUTABLE_SUFFIX) +ifeq ($(strip $(TARGET_CUSTOM_DTBTOOL)),) +DTBTOOL_NAME := dtbToolCM +else +DTBTOOL_NAME := $(TARGET_CUSTOM_DTBTOOL) +endif + +DTBTOOL := $(HOST_OUT_EXECUTABLES)/$(DTBTOOL_NAME)$(HOST_EXECUTABLE_SUFFIX) INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img From bede625d66c7af6c178c567f326165573ed58d2e Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Wed, 5 Nov 2014 21:30:23 +0000 Subject: [PATCH 097/309] Redo BOARD_CUSTOM_BOOTIMG_MK support Simplified version of the previous implementation. Recovery's ramdisk is spun off from the main recovery target again to allow overriding just the image-generation step Change-Id: Ie49620e5e438222d2b80efeefcde4d136e346b90 --- core/Makefile | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/core/Makefile b/core/Makefile index 87f8afdc0..e229a58fb 100644 --- a/core/Makefile +++ b/core/Makefile @@ -549,7 +549,9 @@ bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER) $(BOOT_SIGNER) /boot $(INSTALLED_BOOTIMAGE_TARGET) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(INSTALLED_BOOTIMAGE_TARGET) $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)) -else ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true +else ifndef BOARD_CUSTOM_BOOTIMG_MK + + ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)) # TARGET_BOOTIMAGE_USE_EXT2 != true $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(call pretty,"Target boot image: $@") @@ -579,7 +581,8 @@ bootimage-nodeps: $(MKBOOTIMG) $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)) @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} -endif # TARGET_BOOTIMAGE_USE_EXT2 + endif # PRODUCT_SUPPORTS_VERITY +endif # TARGET_BOOTIMAGE_USE_EXT2 / BOARD_CUSTOM_BOOTIMG_MK else # TARGET_NO_KERNEL # HACK: The top-level targets depend on the bootimage. Not all targets @@ -938,7 +941,7 @@ define build-recoveryimage-target @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} endef -$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \ +$(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(INSTALLED_RAMDISK_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ $(INTERNAL_RECOVERYIMAGE_FILES) \ @@ -949,6 +952,8 @@ $(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \ $(RECOVERY_INSTALL_OTA_KEYS) $(call build-recoveryimage-target, $@) +endif # BOARD_CUSTOM_BOOTIMG_MK + recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch) ota_temp_root := $(PRODUCT_OUT)/ota_temp $(RECOVERY_PATCH_INSTALL): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION) @@ -1011,6 +1016,7 @@ endif $(hide) ./build/tools/releasetools/make_recovery_patch $(ota_temp_root) $(ota_temp_root) $(hide) cp --remove-destination $(ota_temp_root)/SYSTEM/bin/install-recovery.sh $(TARGET_OUT)/bin/install-recovery.sh $(hide) cp --remove-destination $(ota_temp_root)/SYSTEM/recovery-from-boot.p $(TARGET_OUT)/recovery-from-boot.p + $(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET) $(hide) mkdir -p $(dir $@) $(hide) find $(TARGET_RECOVERY_ROOT_OUT)/res -type f | sort | zip -0qrj $@ -@ @@ -1034,6 +1040,11 @@ ifeq ($(BOARD_NAND_SPARE_SIZE),) BOARD_NAND_SPARE_SIZE := 64 endif +ifdef BOARD_CUSTOM_BOOTIMG_MK +include $(BOARD_CUSTOM_BOOTIMG_MK) +endif + + # ----------------------------------------------------------------- # system image # From e264e27bff24268996a37b6273906d6f4070463b Mon Sep 17 00:00:00 2001 From: James Roberts-Thomson Date: Tue, 16 Apr 2013 15:53:39 +1200 Subject: [PATCH 098/309] Consolidate bash version checks There were two checks for bash version, the 2nd was superfluous; this patch removes the duplicate checks, and shifts the bash version check to being back after the "are we in bash" check. Change-Id: I57ff8c1fedce80f739c06643d2976d2c1465db1b Conflicts: envsetup.sh --- envsetup.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 32f300155..c264fab40 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -301,18 +301,18 @@ function settitle() function addcompletions() { - local T dir f - # Keep us from trying to run in something that isn't bash. if [ -z "${BASH_VERSION}" ]; then return fi # Keep us from trying to run in bash that's too old. - if [ ${BASH_VERSINFO[0]} -lt 3 ]; then + if [ "${BASH_VERSINFO[0]}" -lt 4 ] ; then return fi + local T dir f + dirs="sdk/bash_completion vendor/slim/bash_completion" for dir in $dirs; do if [ -d ${dir} ]; then From 57c099324518344483ea1ee3e7f709351a57d590 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Tue, 19 Mar 2013 17:50:37 +0530 Subject: [PATCH 099/309] envsetup: export ANDROID_BUILD_TOP earlier Change-Id: Ia3497f61f4e9aaeeb793cecd3b4f488f0a48181a --- envsetup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index c264fab40..c72024421 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -258,7 +258,6 @@ function set_stuff_for_environment() setpaths set_sequence_number - export ANDROID_BUILD_TOP=$(gettop) # With this environment variable new GCC can apply colors to warnings/errors export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' export ASAN_OPTIONS=detect_leaks=0 @@ -2241,3 +2240,5 @@ done unset f addcompletions + +export ANDROID_BUILD_TOP=$(gettop) From 1713474db5d1fe32aa9bf6e2431b2c187da57326 Mon Sep 17 00:00:00 2001 From: Kyle Ladd Date: Wed, 11 Sep 2013 20:43:42 -0400 Subject: [PATCH 100/309] build: fix bash completion sourcing Sourcing functions in files from a function within a file being sourced was giving bash a hard time. This fixes 'repo' command tab completions. Change-Id: Iac1b3078e20749fb474ed1270e0886cf435e24d9 Conflicts: envsetup.sh --- envsetup.sh | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index c72024421..8f01f479f 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -298,29 +298,19 @@ function settitle() fi } -function addcompletions() +function check_bash_version() { # Keep us from trying to run in something that isn't bash. if [ -z "${BASH_VERSION}" ]; then - return + return 1 fi # Keep us from trying to run in bash that's too old. if [ "${BASH_VERSINFO[0]}" -lt 4 ] ; then - return + return 2 fi - local T dir f - - dirs="sdk/bash_completion vendor/slim/bash_completion" - for dir in $dirs; do - if [ -d ${dir} ]; then - for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do - echo "including $f" - . $f - done - fi - done + return 0 } function choosetype() @@ -2239,6 +2229,17 @@ do done unset f -addcompletions +# Add completions +check_bash_version && { + dirs="sdk/bash_completion vendor/slim/bash_completion" + for dir in $dirs; do + if [ -d ${dir} ]; then + for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do + echo "including $f" + . $f + done + fi + done +} export ANDROID_BUILD_TOP=$(gettop) From bf9e75f9750476b7aeb6123617d2abc15f4d9489 Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Fri, 1 Jun 2012 13:30:25 -0700 Subject: [PATCH 101/309] Fix file-size stats on OSX Kernel makefiles that the stat on the system is GNU stat. GNU stat uses the "-c" option to specify format. Darwin stat uses the "-f" option to specify format. This discrepency will cause kernel build breaks. On my system, I symlink stat to GNU stat. This causes the "get-file-size" define to fail. The fix for this is to detect "gstat", aka GNU stat, and use that appropriately. Change-Id: I987c155b7dc3ff14ffe6da40edf834ca34b7df75 Fix up the get-file-size function that was broken due to the prior commit that made it utilize gnu stat (gstat) Change-Id: I24bba2bfcb509ad1ad76d2260eedd685ba45c393 --- core/combo/HOST_darwin-x86.mk | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/combo/HOST_darwin-x86.mk b/core/combo/HOST_darwin-x86.mk index e77fd218f..26e206925 100644 --- a/core/combo/HOST_darwin-x86.mk +++ b/core/combo/HOST_darwin-x86.mk @@ -102,5 +102,10 @@ endef # $(1): The file to check define get-file-size -stat -f "%z" $(1) +GSTAT=$(which gstat) ; \ +if [ ! -z "$GSTAT" ]; then \ +gstat -c "%s" $(1) ; \ +else \ +stat -f "%z" $(1) ; \ +fi endef From 674c352686c24eaa599b6e9832904ff180fa9589 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Sat, 8 Nov 2014 23:03:53 +0000 Subject: [PATCH 102/309] Remove nulls from the edify generator. These would break the scripts. WTF are they doing here? Change-Id: I0c52335a27f7ae974279fde1c2461b2dba24d263 --- tools/releasetools/edify_generator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 8822d3d31..5d8368d63 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -118,7 +118,7 @@ def AssertOlderBuild(self, timestamp, timestamp_text): def AssertDevice(self, device): """Assert that the device identifier is the given string.""" cmd = ('assert(' + - ' || \0'.join(['get_device_compatible("%s") == "OK" || getprop("ro.build.product") == "%s"' + ' || '.join(['get_device_compatible("%s") == "OK" || getprop("ro.build.product") == "%s"' % (i, i) for i in device.split(",")]) + ' || abort("This package is for device: %s; ' + 'this device is " + getprop("ro.product.device") + ".");' + @@ -128,7 +128,7 @@ def AssertDevice(self, device): def AssertSomeBootloader(self, *bootloaders): """Asert that the bootloader version is one of *bootloaders.""" cmd = ("assert(" + - " ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,) + " || ".join(['getprop("ro.bootloader") == "%s"' % (b,) for b in bootloaders]) + ");") self.script.append(self.WordWrap(cmd)) @@ -136,7 +136,7 @@ def AssertSomeBootloader(self, *bootloaders): def AssertSomeBaseband(self, *basebands): """Assert that the baseband version is one of *basebands.""" cmd = ("assert(" + - " ||\0".join(['getprop("ro.baseband") == "%s"' % (b,) + " || ".join(['getprop("ro.baseband") == "%s"' % (b,) for b in basebands]) + ");") self.script.append(self._WordWrap(cmd)) From fb6b4b921615554210450daf25ead9677ff1360a Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Tue, 30 Jul 2013 20:04:32 +0530 Subject: [PATCH 103/309] Add auditd Change-Id: I54717edbeb9677e53bdd01e6519edd30c387bfe8 --- target/product/embedded.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/target/product/embedded.mk b/target/product/embedded.mk index 25a8975c3..a92cd34e3 100644 --- a/target/product/embedded.mk +++ b/target/product/embedded.mk @@ -74,6 +74,7 @@ PRODUCT_PACKAGES += \ # SELinux packages PRODUCT_PACKAGES += \ + auditd \ sepolicy \ file_contexts \ seapp_contexts \ From 631edb00e75ce4c6932b3a683ceb2a8e29d9e000 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Sat, 8 Nov 2014 23:07:16 -0800 Subject: [PATCH 104/309] build: Automatically set QCOM audio, display, media variants * Device platform should determing the HAL that ought to be used. This commit forces QCOM_HARDWARE to select the -caf HAL variants, which are then broken down by platform within the variant path. Change-Id: I6fc7a3def7b93112f034a3b89552f302727cdbf8 --- core/qcom_target.mk | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index ea91257a0..a6b572adb 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -1,5 +1,10 @@ # Target-specific configuration +# Populate the qcom hardware variants in the project pathmap. +define qcom-set-path-variant +$(call project-set-path-variant,qcom-$(2),TARGET_QCOM_$(1)_VARIANT,hardware/qcom/$(2)) +endef + # Enable DirectTrack on QCOM legacy boards ifeq ($(BOARD_USES_QCOM_HARDWARE),true) @@ -12,21 +17,22 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) endif # Enable DirectTrack for legacy targets - ifneq ($(filter caf bfam,$(TARGET_QCOM_AUDIO_VARIANT)),) - ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) - TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK - TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK - endif + ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) + TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK + TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK endif -endif -# Populate the qcom hardware variants in the project pathmap. -define qcom-set-path-variant -$(call project-set-path-variant,qcom-$(2),TARGET_QCOM_$(1)_VARIANT,hardware/qcom/$(2)) -endef -$(call qcom-set-path-variant,AUDIO,audio) +$(call project-set-path,qcom-audio,hardware/qcom/audio-caf) $(call qcom-set-path-variant,CAMERA,camera) -$(call qcom-set-path-variant,DISPLAY,display) +$(call project-set-path,qcom-display,hardware/qcom/display-caf) $(call qcom-set-path-variant,GPS,gps) -$(call qcom-set-path-variant,MEDIA,media) +$(call project-set-path,qcom-media,hardware/qcom/media-caf) $(call qcom-set-path-variant,SENSORS,sensors) +else +$(call project-set-path,qcom-audio,hardware/qcom/audio) +$(call qcom-set-path-variant,CAMERA,camera) +$(call project-set-path,qcom-display,hardware/qcom/display) +$(call qcom-set-path-variant,GPS,gps) +$(call project-set-path,qcom-media,hardware/qcom/media) +$(call qcom-set-path-variant,SENSORS,sensors) +endif From 35c1def2a9ce47013d684401206390d775e7a12e Mon Sep 17 00:00:00 2001 From: Dan Pasanen Date: Tue, 11 Nov 2014 13:54:31 -0600 Subject: [PATCH 105/309] base: build javax.btobex Change-Id: Ia82f74e7eb0f5758732399064277cf18a1fc15d9 --- target/product/core_tiny.mk | 1 + target/product/generic_no_telephony.mk | 1 + 2 files changed, 2 insertions(+) diff --git a/target/product/core_tiny.mk b/target/product/core_tiny.mk index 0a9227526..9684b11d8 100644 --- a/target/product/core_tiny.mk +++ b/target/product/core_tiny.mk @@ -55,6 +55,7 @@ PRODUCT_PACKAGES += \ ip6tables \ iptables \ gatekeeperd \ + javax.btobex \ keystore \ keystore.default \ libOpenMAXAL \ diff --git a/target/product/generic_no_telephony.mk b/target/product/generic_no_telephony.mk index f6ccd2a54..2a2a0f021 100644 --- a/target/product/generic_no_telephony.mk +++ b/target/product/generic_no_telephony.mk @@ -32,6 +32,7 @@ PRODUCT_PACKAGES := \ PRODUCT_PACKAGES += \ clatd \ clatd.conf \ + javax.btobex \ pppd \ screenrecord From 398b1ed1444f6b4f54310c1d52836b16938b4fa8 Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Wed, 16 Nov 2011 16:45:03 -0800 Subject: [PATCH 106/309] recovery: build uncompressed ramdisk Change-Id: I6027868864ec2696f20705faf4c68c684e7ef459 Signed-off-by: Chirayu Desai --- core/Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/Makefile b/core/Makefile index e229a58fb..0e45516a5 100644 --- a/core/Makefile +++ b/core/Makefile @@ -815,6 +815,7 @@ recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolic recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img recovery_build_prop := $(intermediate_system_build_prop) +recovery_uncompressed_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.cpio recovery_resources_common := $(call include-path-for, recovery)/res # Set recovery_density to the density bucket of the device. @@ -930,6 +931,11 @@ define build-recoveryimage-target $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) + +$(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) + @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} + $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) From 03fc62f31de254672f93a4b4e1bf2ca164f859af Mon Sep 17 00:00:00 2001 From: Danesh Mondegarian Date: Thu, 7 Nov 2013 18:44:02 -0800 Subject: [PATCH 107/309] Remove HomeSample from builds As good as it looks, we already have a launcher (Launcher3) Change-Id: I87cac70eec30fd62d6f7ecb7777caa9d240fe0ee --- target/product/core_base.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/target/product/core_base.mk b/target/product/core_base.mk index 03d33e1b6..e4e3a7beb 100644 --- a/target/product/core_base.mk +++ b/target/product/core_base.mk @@ -23,7 +23,6 @@ PRODUCT_PROPERTY_OVERRIDES := \ PRODUCT_PACKAGES += \ ContactsProvider \ DefaultContainerService \ - Home \ TelephonyProvider \ UserDictionaryProvider \ atrace \ From 1f5245be6f63b99d6ce8c949652967ed8a1cf81c Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Tue, 14 Oct 2014 12:43:28 -0700 Subject: [PATCH 108/309] product_config: Set SLIM_BUILD priority above TARGET_BUILD_APPS Even if we are doing TARGET_BUILD_APPS, we may want to use a cm target for building. Change-Id: Ic8053bc679fa8b726be90e611aeaacdf1850cabc Conflicts: core/product_config.mk --- core/product_config.mk | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/product_config.mk b/core/product_config.mk index 5f5adc9b8..20805cbb6 100644 --- a/core/product_config.mk +++ b/core/product_config.mk @@ -179,19 +179,20 @@ include $(BUILD_SYSTEM)/node_fns.mk include $(BUILD_SYSTEM)/product.mk include $(BUILD_SYSTEM)/device.mk -ifneq ($(strip $(TARGET_BUILD_APPS)),) -# An unbundled app build needs only the core product makefiles. -all_product_configs := $(call get-product-makefiles,\ - $(SRC_TARGET_DIR)/product/AndroidProducts.mk) +# A SLIM build needs only the CM product makefiles. +ifneq ($(SLIM_BUILD),) + all_product_configs := $(shell ls device/*/$(SLIM_BUILD)/slim.mk) else - ifneq ($(SLIM_BUILD),) - all_product_configs := $(shell ls device/*/$(SLIM_BUILD)/cm.mk) + ifneq ($(strip $(TARGET_BUILD_APPS)),) + # An unbundled app build needs only the core product makefiles. + all_product_configs := $(call get-product-makefiles,\ + $(SRC_TARGET_DIR)/product/AndroidProducts.mk) else # Read in all of the product definitions specified by the AndroidProducts.mk # files in the tree. all_product_configs := $(get-all-product-makefiles) - endif -endif + endif # TARGET_BUILD_APPS +endif # SLIM_BUILD ifeq ($(SLIM_BUILD),) # Find the product config makefile for the current product. From 62b33b5de35d4d01c0434f2a7b4ffd3426c1ea51 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Mon, 10 Nov 2014 14:34:48 -0800 Subject: [PATCH 109/309] build: Enable QCOM_BSP, QC_AV with QCOM_HARDWARE Change-Id: Ibf9dd35272521109fea52e46bacf6e1e3074ed6a --- core/qcom_target.mk | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index a6b572adb..476548f1b 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -11,16 +11,19 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_GLOBAL_CFLAGS += -DQCOM_HARDWARE TARGET_GLOBAL_CPPFLAGS += -DQCOM_HARDWARE - ifeq ($(TARGET_USES_QCOM_BSP),true) - TARGET_GLOBAL_CFLAGS += -DQCOM_BSP - TARGET_GLOBAL_CPPFLAGS += -DQCOM_BSP - endif + TARGET_USES_QCOM_BSP := true + TARGET_GLOBAL_CFLAGS += -DQCOM_BSP + TARGET_GLOBAL_CPPFLAGS += -DQCOM_BSP + + TARGET_ENABLE_QC_AV_ENHANCEMENTS := true # Enable DirectTrack for legacy targets + ifneq ($(filter msm7x30 msm8660 msm8960,$(TARGET_BOARD_PLATFORM)),) ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK endif + endif $(call project-set-path,qcom-audio,hardware/qcom/audio-caf) $(call qcom-set-path-variant,CAMERA,camera) From 6107826b9ccdcc7bdc58f5d91cadbf8ac495e9f6 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Mon, 17 Nov 2014 22:06:31 +0000 Subject: [PATCH 110/309] pathmap: Point QC HAL pathmaps directly to the source Directly map to the actual HAL directory, including the board platform. This lets project-path-for point directly to the respective HALs. Change-Id: Ic4ed61bbdea9d0b5683502bf84a8410e76858527 --- core/qcom_target.mk | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 476548f1b..ae94150f8 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -25,17 +25,17 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) endif endif -$(call project-set-path,qcom-audio,hardware/qcom/audio-caf) +$(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,CAMERA,camera) -$(call project-set-path,qcom-display,hardware/qcom/display-caf) +$(call project-set-path,qcom-display,hardware/qcom/display-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,GPS,gps) -$(call project-set-path,qcom-media,hardware/qcom/media-caf) +$(call project-set-path,qcom-media,hardware/qcom/media-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,SENSORS,sensors) else -$(call project-set-path,qcom-audio,hardware/qcom/audio) +$(call project-set-path,qcom-audio,hardware/qcom/audio/default) $(call qcom-set-path-variant,CAMERA,camera) -$(call project-set-path,qcom-display,hardware/qcom/display) +$(call project-set-path,qcom-display,hardware/qcom/display/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,GPS,gps) -$(call project-set-path,qcom-media,hardware/qcom/media) +$(call project-set-path,qcom-media,hardware/qcom/media/default) $(call qcom-set-path-variant,SENSORS,sensors) endif From b9d7ab69802a2a7c95d994d9f3c2a3df6e303bc8 Mon Sep 17 00:00:00 2001 From: nuclearmistake Date: Sun, 27 Apr 2014 21:59:42 -0400 Subject: [PATCH 111/309] Colorize javac errors You know those multi-line ones that are frequently interspersed with other buld output? The ones that don't even have the word "error" in them to search for? This makes them red. ps2: fix jar_check failures cause by leaving empty stderr files in intermediates dirs this is probably not the best place to store stderr before colorizing it if javac exits non-zero, but it seems much lighter than mkdiring a bunch of temp directories or using sed to mangle the paths to point to per-intermediates directory unique temporary file names Change-Id: I3b9b7d8a0c76958588ac1603b6742987d6dde54c Signed-off-by: nuclearmistake --- core/definitions.mk | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/definitions.mk b/core/definitions.mk index acf11a808..de8fc1215 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -1803,7 +1803,11 @@ $(hide) if [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq ] ; the -extdirs "" -d $(PRIVATE_CLASS_INTERMEDIATES_DIR) \ $(PRIVATE_JAVACFLAGS) \ \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq \ - || ( rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) ; exit 41 ) \ + 2>$(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr \ + && rm -f $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr \ + || ( if [ -e $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ]; then \ + echo -e ${CL_RED}"`cat $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr`"${CL_RST} 1>&2; \ + fi; rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR); exit 41 ) \ fi $(if $(PRIVATE_JAVA_LAYERS_FILE), $(hide) build/tools/java-layers.py \ $(PRIVATE_JAVA_LAYERS_FILE) \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq,) @@ -2302,8 +2306,9 @@ endef ########################################################### ## Commands to call Proguard ########################################################### +@echo -e ${CL_CYN}"Copying:"${CL_RST}" $@" +@echo -e ${CL_GRN}"Proguard:"${CL_RST}" $@" define transform-jar-to-proguard -@echo Proguard: $@ $(hide) $(PROGUARD) -injars $< -outjars $@ $(PRIVATE_PROGUARD_FLAGS) \ $(addprefix -injars , $(PRIVATE_EXTRA_INPUT_JAR)) endef From 401368bec2c3cf5d03194b27a8c16c9b1bf0b07e Mon Sep 17 00:00:00 2001 From: nuclearmistake Date: Thu, 15 May 2014 10:30:26 -0400 Subject: [PATCH 112/309] colorize non-fatal javac stderr yellow (and actually display it... at all) woops! Signed-off-by: nuclearmistake Change-Id: I984f428022a68a825aa041866e8d459bd6611f71 --- core/definitions.mk | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/definitions.mk b/core/definitions.mk index de8fc1215..e1c4a1b6e 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -1804,10 +1804,12 @@ $(hide) if [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq ] ; the $(PRIVATE_JAVACFLAGS) \ \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq \ 2>$(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr \ - && rm -f $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr \ - || ( if [ -e $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ]; then \ + && ( [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ] && \ + echo -e ${CL_YLW}"`cat $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr`"${CL_RST} 1>&2; \ + rm -f $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ) \ + || ( [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ] && \ echo -e ${CL_RED}"`cat $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr`"${CL_RST} 1>&2; \ - fi; rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR); exit 41 ) \ + rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR); exit 41 ) \ fi $(if $(PRIVATE_JAVA_LAYERS_FILE), $(hide) build/tools/java-layers.py \ $(PRIVATE_JAVA_LAYERS_FILE) \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq,) From 7c0bca0bf05e66a44c65a00c8e986825a017c1d2 Mon Sep 17 00:00:00 2001 From: Oliver Middleton Date: Sun, 12 Oct 2014 22:11:26 +0100 Subject: [PATCH 113/309] build: Fix some colored build issues * Fix some broken lines in definitions.mk * Finish adding colors to product-graph.mk Change-Id: I235a60c967b1f10ec6dd1cac25740badbd3b64c9 --- core/definitions.mk | 6 +++--- core/tasks/product-graph.mk | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/definitions.mk b/core/definitions.mk index e1c4a1b6e..631ef4d80 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -2277,19 +2277,19 @@ endef # Copy a prebuilt file to a target location. define transform-prebuilt-to-target -@echo -e "$(if $(PRIVATE_IS_HOST_MODULE),host,target) "${CL_CYN}"Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_CYN}"$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target) endef # Copy a prebuilt file to a target location, using zipalign on it. define transform-prebuilt-to-target-with-zipalign -@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) "${CL_CYN}"Prebuilt APK:"${CL_RST}" $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_CYN}"$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt APK:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target-with-zipalign) endef # Copy a prebuilt file to a target location, stripping "# comment" comments. define transform-prebuilt-to-target-strip-comments -@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) "${CL_CYN}"Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)" +@echo -e ${CL_CYN}"$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)" $(copy-file-to-target-strip-comments) endef diff --git a/core/tasks/product-graph.mk b/core/tasks/product-graph.mk index 94a1dc7fb..38f193674 100644 --- a/core/tasks/product-graph.mk +++ b/core/tasks/product-graph.mk @@ -70,7 +70,7 @@ $(products_graph): PRIVATE_PRODUCTS := $(really_all_products) $(products_graph): PRIVATE_PRODUCTS_FILTER := $(products_list) $(products_graph): $(this_makefile) - @echo Product graph DOT: $@ for $(PRIVATE_PRODUCTS_FILTER) + @echo -e ${CL_GRN}"Product graph DOT:"${CL_RST}" $@ for $(PRIVATE_PRODUCTS_FILTER)" $(hide) echo 'digraph {' > $@.in $(hide) echo 'graph [ ratio=.5 ];' >> $@.in $(hide) $(foreach p,$(PRIVATE_PRODUCTS), \ @@ -89,7 +89,7 @@ endef # $(1) product file define transform-product-debug $(OUT_DIR)/products/$(strip $(1)).txt: $(this_makefile) - @echo Product debug info file: $$@ + @echo -e ${CL_GRN}"Product debug info file:"${CL_RST}" $$@" $(hide) rm -f $$@ $(hide) mkdir -p $$(dir $$@) $(hide) echo 'FILE=$(strip $(1))' >> $$@ @@ -123,7 +123,7 @@ $(call product-debug-filename, $(p)): \ $(OUT_DIR)/products/$(strip $(1)).txt \ build/tools/product_debug.py \ $(this_makefile) - @echo Product debug html file: $$@ + @echo -e ${CL_GRN}"Product debug html file:"${CL_RST}" $$@" $(hide) mkdir -p $$(dir $$@) $(hide) cat $$< | build/tools/product_debug.py > $$@ endef @@ -139,7 +139,7 @@ $(products_pdf): $(products_graph) dot -Tpdf -Nshape=box -o $@ $< $(products_svg): $(products_graph) $(product_debug_files) - @echo Product graph SVG: $@ + @echo -e ${CL_GRN}"Product graph SVG:"${CL_RST}" $@" dot -Tsvg -Nshape=box -o $@ $< product-graph: $(products_pdf) $(products_svg) From 7294834505fa29fced76f336b0086603d6202e9c Mon Sep 17 00:00:00 2001 From: myfluxi Date: Sun, 23 Mar 2014 21:54:27 +0100 Subject: [PATCH 114/309] build: Degreenify java Change-Id: Iac348407997f7e52844af0494ee59762df0819d7 --- core/java.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/java.mk b/core/java.mk index 3dc8ffcf6..f99e6ef40 100644 --- a/core/java.mk +++ b/core/java.mk @@ -432,7 +432,7 @@ endif # Keep a copy of the jar just before proguard processing. $(full_classes_jar): $(full_classes_emma_jar) | $(ACP) - @echo -e ${CL_GRN}"Copying:"${CL_GRN}" $@" + @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@" $(hide) $(ACP) -fp $< $@ # Run proguard if necessary, otherwise just copy the file. From e6489e5a4ff8d5192f7f1f23909b517ba9999f09 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Thu, 1 May 2014 16:33:02 +0300 Subject: [PATCH 115/309] build: Don't build video editor libraries * These are gone in L Change-Id: I7dae9c6736590be8cd080195da4062258ee4a424 --- target/product/generic_no_telephony.mk | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/target/product/generic_no_telephony.mk b/target/product/generic_no_telephony.mk index 2a2a0f021..0f9935a20 100644 --- a/target/product/generic_no_telephony.mk +++ b/target/product/generic_no_telephony.mk @@ -37,12 +37,7 @@ PRODUCT_PACKAGES += \ screenrecord PRODUCT_PACKAGES += \ - librs_jni \ - libvideoeditor_jni \ - libvideoeditor_core \ - libvideoeditor_osal \ - libvideoeditor_videofilters \ - libvideoeditorplayer \ + librs_jni PRODUCT_PACKAGES += \ audio.primary.default \ From a33da1217c5fa1646006b8986798e6cce43b42ef Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Tue, 11 Nov 2014 13:09:58 +0200 Subject: [PATCH 116/309] Remove deprecated custom squisher code * No devices should be using that anymore Change-Id: I752cf5297d35a842a923a8008fbec53e8fae97e0 Conflicts: core/Makefile --- core/Makefile | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/Makefile b/core/Makefile index 0e45516a5..e179d84db 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1730,9 +1730,6 @@ SLIM_TARGET_PACKAGE := $(PRODUCT_OUT)/Slim-$(SLIM_VERSION).zip .PHONY: otapackage bacon otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage -ifneq ($(TARGET_CUSTOM_RELEASETOOL),) - $(error TARGET_CUSTOM_RELEASETOOL is deprecated) -endif $(hide) ln -f $(INTERNAL_OTA_PACKAGE_TARGET) $(SLIM_TARGET_PACKAGE) $(hide) $(MD5SUM) $(SLIM_TARGET_PACKAGE) > $(SLIM_TARGET_PACKAGE).md5sum @echo -e ${CL_CYN}"Package Complete: $(SLIM_TARGET_PACKAGE)"${CL_RST} From bfe2970f598cb3a2434f17e68883b9f01df33c8f Mon Sep 17 00:00:00 2001 From: Arne Coucheron Date: Sat, 3 Mar 2012 22:53:06 +0100 Subject: [PATCH 117/309] Fix inheritance of PRODUCT_PROPERTY_OVERRIDES for CM audio files Otherwise the ones set in SLIM vendor will be overridden by these. Change-Id: I3f87dfd009d8ddfd48972c15770599742b12c4af Author: Abhisek Devkota Remove extra product property overrides This appears to clash with vendor/cm/common mk's for overrides; causes a broken default alarm (and OnTheHunt is so bleh). Change-Id: I5ec990d3c5ad56b573c45f1a3b20169c989851a0 --- target/product/core_base.mk | 4 ---- target/product/full_base.mk | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/target/product/core_base.mk b/target/product/core_base.mk index e4e3a7beb..86fb36cd7 100644 --- a/target/product/core_base.mk +++ b/target/product/core_base.mk @@ -16,10 +16,6 @@ # Note that components added here will be also shared in PDK. Components # that should not be in PDK should be added in lower level like core.mk. -PRODUCT_PROPERTY_OVERRIDES := \ - ro.config.notification_sound=OnTheHunt.ogg \ - ro.config.alarm_alert=Alarm_Classic.ogg - PRODUCT_PACKAGES += \ ContactsProvider \ DefaultContainerService \ diff --git a/target/product/full_base.mk b/target/product/full_base.mk index 7d19685f1..7c7c86955 100644 --- a/target/product/full_base.mk +++ b/target/product/full_base.mk @@ -29,8 +29,7 @@ PRODUCT_PACKAGES := \ # Additional settings used in all AOSP builds PRODUCT_PROPERTY_OVERRIDES := \ - ro.config.ringtone=Ring_Synth_04.ogg \ - ro.config.notification_sound=pixiedust.ogg + ro.com.android.dateformat=MM-dd-yyyy # Put en_US first in the list, so make it default. PRODUCT_LOCALES := en_US From fa2974a37de41856b2e9b339dee9aaa5cd20e17d Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Sun, 9 Nov 2014 17:27:51 +0530 Subject: [PATCH 118/309] Allow specifying kernel toolchain and prefix Change-Id: Ib75cfe772e073f2196455f3e3dfba247f6b3feff --- core/tasks/kernel.mk | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 4d519a07c..b0ab1def9 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -112,12 +112,19 @@ KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr KERNEL_MODULES_INSTALL := system KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules +ifeq ($(KERNEL_TOOLCHAIN),) +KERNEL_TOOLCHAIN := $(ARM_EABI_TOOLCHAIN) +endif +ifeq ($(KERNEL_TOOLCHAIN_PREFIX),) +KERNEL_TOOLCHAIN_PREFIX := arm-eabi- +endif + define mv-modules mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.order`;\ if [ "$$mdpath" != "" ];then\ mpath=`dirname $$mdpath`;\ ko=`find $$mpath/kernel -type f -name *.ko`;\ - for i in $$ko; do $(ARM_EABI_TOOLCHAIN)/arm-eabi-strip --strip-unneeded $$i;\ + for i in $$ko; do $(KERNEL_TOOLCHAIN)/$(KERNEL_TOOLCHAIN_PREFIX)strip --strip-unneeded $$i;\ mv $$i $(KERNEL_MODULES_OUT)/; done;\ fi endef @@ -135,7 +142,7 @@ ifeq ($(TARGET_ARCH),arm) # Check that the executable is here. ccache := $(strip $(wildcard $(ccache))) endif - ARM_CROSS_COMPILE:=CROSS_COMPILE="$(ccache) $(ARM_EABI_TOOLCHAIN)/arm-eabi-" + ARM_CROSS_COMPILE:=CROSS_COMPILE="$(ccache) $(KERNEL_TOOLCHAIN)/$(KERNEL_TOOLCHAIN_PREFIX)" ccache = endif From fe4111f29b2806b1ad20a81916cb07323b12c2bc Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Tue, 28 Oct 2014 16:12:15 -0700 Subject: [PATCH 119/309] build: recovery: Support adding device-specific items (cdesai): Also add the relevant parts from koush's commit 7fd5a89df5ec3961c9aedfd2afad50d6d2561056 "Add initial support for charge mode. also support recovery/root in the device tree to easily copy props into recovery images." Change-Id: I25205e68282680932917016646dabd1abadbfce6 --- core/Makefile | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/core/Makefile b/core/Makefile index e179d84db..7f19f13cf 100644 --- a/core/Makefile +++ b/core/Makefile @@ -845,6 +845,16 @@ else recovery_font := $(call include-path-for, recovery)/fonts/12x22.png endif +ifneq ($(TARGET_RECOVERY_DEVICE_DIRS),) +recovery_root_private := $(strip \ + $(foreach d,$(TARGET_RECOVERY_DEVICE_DIRS), $(wildcard $(d)/recovery/root))) +else +recovery_root_private := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery/root)) +endif +ifneq ($(recovery_root_private),) +recovery_root_deps := $(shell find $(recovery_root_private) -type f) +endif + recovery_resources_private := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery/res)) recovery_resource_deps := $(shell find $(recovery_resources_common) \ $(recovery_resources_private) -type f) @@ -923,6 +933,8 @@ define build-recoveryimage-target $(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/* $(hide) cp -rf $(recovery_resources_common)/* $(TARGET_RECOVERY_ROOT_OUT)/res $(hide) cp -f $(recovery_font) $(TARGET_RECOVERY_ROOT_OUT)/res/images/font.png + $(hide) $(foreach item,$(recovery_root_private), \ + cp -rf $(item) $(TARGET_RECOVERY_OUT)/) $(hide) $(foreach item,$(recovery_resources_private), \ cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/) $(hide) $(foreach item,$(recovery_fstab), \ @@ -953,7 +965,7 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(INTERNAL_RECOVERYIMAGE_FILES) \ $(recovery_initrc) $(recovery_sepolicy) $(recovery_kernel) \ $(INSTALLED_2NDBOOTLOADER_TARGET) \ - $(recovery_build_prop) $(recovery_resource_deps) \ + $(recovery_build_prop) $(recovery_resource_deps) $(recovery_root_deps) \ $(recovery_fstab) \ $(RECOVERY_INSTALL_OTA_KEYS) $(call build-recoveryimage-target, $@) From 07c5a284aedf71a4bc38d641e2d93f34d2c59868 Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Tue, 28 Jan 2014 15:51:51 -0600 Subject: [PATCH 120/309] Allow devices to specify certain ro. props via TARGET_UNIFIED_DEVICE This modifies buildinfo.sh to not set model, name, description, and fingerprint in build.prop. These can then be set via custom init lib. This also fixes ota_from_target_files to function without these props being preset. Change-Id: I945b2c38e60b207d2d0f82b34f4f230ac21b0657 --- core/Makefile | 7 ++- tools/buildinfo.sh | 16 +++--- tools/releasetools/ota_from_target_files | 73 +++++++++++++++--------- 3 files changed, 61 insertions(+), 35 deletions(-) diff --git a/core/Makefile b/core/Makefile index 7f19f13cf..b3a3e9f5d 100644 --- a/core/Makefile +++ b/core/Makefile @@ -247,6 +247,7 @@ endif TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \ TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \ TARGET_AAPT_CHARACTERISTICS="$(TARGET_AAPT_CHARACTERISTICS)" \ + TARGET_UNIFIED_DEVICE="$(TARGET_UNIFIED_DEVICE)" \ $(PRODUCT_BUILD_PROP_OVERRIDES) \ bash $(BUILDINFO_SH) >> $@ $(hide) $(foreach file,$(system_prop_file), \ @@ -1723,6 +1724,10 @@ else $(INTERNAL_OTA_PACKAGE_TARGET): override_device := $(TARGET_OTA_ASSERT_DEVICE) endif +ifneq ($(TARGET_UNIFIED_DEVICE),) + $(INTERNAL_OTA_PACKAGE_TARGET): override_prop := --override_prop=true +endif + $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) @echo "$(OTA_FROM_TARGET_SCRIPT)" > $(PRODUCT_OUT)/ota_script_path @echo "$(override_device)" > $(PRODUCT_OUT)/ota_override_device @@ -1733,7 +1738,7 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) -p $(HOST_OUT) \ -k $(KEY_CERT_PAIR) \ --backup=$(backuptool) \ - --override_device=$(override_device) \ + --override_device=$(override_device) $(override_prop) \ $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \ $(BUILT_TARGET_FILES_PACKAGE) $@ diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh index 85d60a33e..185690d9d 100755 --- a/tools/buildinfo.sh +++ b/tools/buildinfo.sh @@ -20,10 +20,8 @@ echo "ro.build.user=$USER" echo "ro.build.host=`hostname`" echo "ro.build.tags=$BUILD_VERSION_TAGS" echo "ro.build.flavor=$TARGET_BUILD_FLAVOR" -echo "ro.product.model=$PRODUCT_MODEL" echo "ro.product.brand=$PRODUCT_BRAND" echo "ro.product.name=$PRODUCT_NAME" -echo "ro.product.device=$TARGET_DEVICE" echo "ro.product.board=$TARGET_BOOTLOADER_BOARD_NAME" # These values are deprecated, use "ro.product.cpu.abilist" @@ -48,11 +46,15 @@ echo "ro.board.platform=$TARGET_BOARD_PLATFORM" echo "# ro.build.product is obsolete; use ro.product.device" echo "ro.build.product=$TARGET_DEVICE" -echo "# Do not try to parse description, fingerprint, or thumbprint" -echo "ro.build.description=$PRIVATE_BUILD_DESC" -echo "ro.build.fingerprint=$BUILD_FINGERPRINT" -if [ -n "$BUILD_THUMBPRINT" ] ; then - echo "ro.build.thumbprint=$BUILD_THUMBPRINT" +if [ "$TARGET_UNIFIED_DEVICE" == "" ] ; then + echo "ro.product.model=$PRODUCT_MODEL" + echo "ro.product.device=$TARGET_DEVICE" + echo "# Do not try to parse description, fingerprint, or thumbprint" + echo "ro.build.description=$PRIVATE_BUILD_DESC" + echo "ro.build.fingerprint=$BUILD_FINGERPRINT" + if [ -n "$BUILD_THUMBPRINT" ] ; then + echo "ro.build.thumbprint=$BUILD_THUMBPRINT" + fi fi echo "ro.build.characteristics=$TARGET_AAPT_CHARACTERISTICS" diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index cdc211feb..dc5995fcd 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -96,6 +96,10 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package --override_device Override device-specific asserts. Can be a comma-separated list. + --override_prop + Override build.prop items with custom vendor init. + Enabled when TARGET_UNIFIED_DEVICE is defined in BoardConfig + """ import sys @@ -137,6 +141,7 @@ OPTIONS.full_radio = False OPTIONS.full_bootloader = False OPTIONS.backuptool = False OPTIONS.override_device = 'auto' +OPTIONS.override_prop = False def MostPopularKey(d, default): """Given a dict, return the key corresponding to the largest @@ -417,7 +422,10 @@ def AppendAssertions(script, info_dict, oem_dict=None): oem_props = info_dict.get("oem_fingerprint_properties") if oem_props is None or len(oem_props) == 0: if OPTIONS.override_device == "auto": - device = GetBuildProp("ro.product.device", info_dict) + if OPTIONS.override_prop: + device = GetBuildProp("ro.build.product", info_dict) + else: + device = GetBuildProp("ro.product.device", info_dict) else: device = OPTIONS.override_device script.AssertDevice(device) @@ -518,13 +526,18 @@ def WriteFullOTAPackage(input_zip, output_zip): oem_dict = common.LoadDictionaryFromLines( open(OPTIONS.oem_source).readlines()) - metadata = { - "post-build": CalculateFingerprint(oem_props, oem_dict, - OPTIONS.info_dict), - "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, - OPTIONS.info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), - } + if OPTIONS.override_prop: + metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.info_dict), + } + else: + metadata = {"post-build": CalculateFingerprint( + oem_props, oem_dict, OPTIONS.info_dict), + "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, + OPTIONS.info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.info_dict), + } device_specific = common.DeviceSpecificParams( input_zip=input_zip, @@ -779,12 +792,16 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): source_version, OPTIONS.target_info_dict, fstab=OPTIONS.source_info_dict["fstab"]) - metadata = { - "pre-device": GetBuildProp("ro.product.device", - OPTIONS.source_info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } + if OPTIONS.override_prop: + metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + else: + metadata = {"pre-device": GetBuildProp("ro.product.device", + OPTIONS.source_info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } device_specific = common.DeviceSpecificParams( source_zip=source_zip, @@ -1204,20 +1221,19 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): else: vendor_diff = None - target_fp = CalculateFingerprint(oem_props, oem_dict, - OPTIONS.target_info_dict) - source_fp = CalculateFingerprint(oem_props, oem_dict, - OPTIONS.source_info_dict) + if not OPTIONS.override_prop: + target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict) + source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict) - if oem_props is None: - script.AssertSomeFingerprint(source_fp, target_fp) - else: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + if oem_props is None: + script.AssertSomeFingerprint(source_fp, target_fp) + else: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - metadata["pre-build"] = source_fp - metadata["post-build"] = target_fp + metadata["pre-build"] = source_fp + metadata["post-build"] = target_fp source_boot = common.GetBootableImage( "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", @@ -1583,6 +1599,8 @@ def main(argv): OPTIONS.backuptool = bool(a.lower() == 'true') elif o in ("--override_device"): OPTIONS.override_device = a + elif o in ("--override_prop"): + OPTIONS.override_prop = bool(a.lower() == 'true') else: return False return True @@ -1608,7 +1626,8 @@ def main(argv): "verify", "no_fallback_to_full", "backup=", - "override_device="], + "override_device=", + "override_prop="], ], extra_option_handler=option_handler) if len(args) != 2: From b53d16e5291a0d76281440fc2da0bddb26fa4bda Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Thu, 28 Aug 2014 18:29:14 -0500 Subject: [PATCH 121/309] Edify: Add/Update abort messages for some asserts * If a bootloader or baseband assert is not satisfied, print the versions that are supported by the package as well as the version on the device. Change-Id: I958d49281c51bd4e60d596a727bb94cfc4a21909 --- tools/releasetools/edify_generator.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 5d8368d63..cc5e33b9f 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -126,10 +126,13 @@ def AssertDevice(self, device): self.script.append(cmd) def AssertSomeBootloader(self, *bootloaders): - """Asert that the bootloader version is one of *bootloaders.""" + """Assert that the bootloader version is one of *bootloaders.""" cmd = ("assert(" + " || ".join(['getprop("ro.bootloader") == "%s"' % (b,) for b in bootloaders]) + + ' || abort("This package supports bootloader(s): ' + + ", ".join(["%s" % (b,) for b in bootloaders]) + + '; this device has bootloader " + getprop("ro.bootloader") + ".");' + ");") self.script.append(self.WordWrap(cmd)) @@ -138,6 +141,9 @@ def AssertSomeBaseband(self, *basebands): cmd = ("assert(" + " || ".join(['getprop("ro.baseband") == "%s"' % (b,) for b in basebands]) + + ' || abort("This package supports baseband(s): ' + + ", ".join(["%s" % (b,) for b in basebands]) + + '; this device has baseband " + getprop("ro.baseband") + ".");' + ");") self.script.append(self._WordWrap(cmd)) From db7a5a0013b14ff550fd6570e7248d3b87817ff0 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Thu, 17 Nov 2011 00:13:29 +0000 Subject: [PATCH 122/309] ota: Build uboot bootable images when necessary Conflicts: core/Makefile tools/releasetools/common.py Change-Id: I18996bb1f5377bbf369fb41774aaa1f5821e7d1f --- core/Makefile | 18 +++++++ tools/releasetools/common.py | 93 +++++++++++++++++++++++------------- 2 files changed, 77 insertions(+), 34 deletions(-) diff --git a/core/Makefile b/core/Makefile index b3a3e9f5d..36ec5c6d3 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1536,6 +1536,21 @@ else $(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS) endif +ifeq ($(BOARD_USES_UBOOT_MULTIIMAGE),true) + + ZIP_SAVE_UBOOTIMG_ARGS := -A ARM -O Linux -T multi -C none -n Image + + BOARD_UBOOT_ENTRY := $(strip $(BOARD_UBOOT_ENTRY)) + ifdef BOARD_UBOOT_ENTRY + ZIP_SAVE_UBOOTIMG_ARGS += -e $(BOARD_UBOOT_ENTRY) + endif + BOARD_UBOOT_LOAD := $(strip $(BOARD_UBOOT_LOAD)) + ifdef BOARD_UBOOT_LOAD + ZIP_SAVE_UBOOTIMG_ARGS += -a $(BOARD_UBOOT_LOAD) + endif + +endif + # Depending on the various images guarantees that the underlying # directories are up-to-date. $(BUILT_TARGET_FILES_PACKAGE): \ @@ -1600,6 +1615,9 @@ ifdef BOARD_KERNEL_BASE endif ifdef BOARD_KERNEL_PAGESIZE $(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/BOOT/pagesize +endif +ifdef ZIP_SAVE_UBOOTIMG_ARGS + $(hide) echo "$(ZIP_SAVE_UBOOTIMG_ARGS)" > $(zip_root)/BOOT/ubootargs endif $(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\ mkdir -p $(zip_root)/RADIO; \ diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 94eacc24e..36df1c29e 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -331,44 +331,69 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,) - # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set - mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" - - cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")] - - fn = os.path.join(sourcedir, "second") - if os.access(fn, os.F_OK): - cmd.append("--second") - cmd.append(fn) - - fn = os.path.join(sourcedir, "cmdline") + """check if uboot is requested""" + fn = os.path.join(sourcedir, "ubootargs") if os.access(fn, os.F_OK): - cmd.append("--cmdline") - cmd.append(open(fn).read().rstrip("\n")) + cmd = ["mkimage"] + for argument in open(fn).read().rstrip("\n").split(" "): + cmd.append(argument) + cmd.append("-d") + cmd.append(os.path.join(sourcedir, "kernel")+":"+ramdisk_img.name) + cmd.append(img.name) - fn = os.path.join(sourcedir, "base") - if os.access(fn, os.F_OK): - cmd.append("--base") - cmd.append(open(fn).read().rstrip("\n")) - - fn = os.path.join(sourcedir, "pagesize") - if os.access(fn, os.F_OK): - cmd.append("--pagesize") - cmd.append(open(fn).read().rstrip("\n")) - - args = info_dict.get("mkbootimg_args", None) - if args and args.strip(): - cmd.extend(shlex.split(args)) - - img_unsigned = None - if info_dict.get("vboot", None): - img_unsigned = tempfile.NamedTemporaryFile() - cmd.extend(["--ramdisk", ramdisk_img.name, - "--output", img_unsigned.name]) else: - cmd.extend(["--ramdisk", ramdisk_img.name, + # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set + mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" + cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")] + + fn = os.path.join(sourcedir, "second") + if os.access(fn, os.F_OK): + cmd.append("--second") + cmd.append(fn) + + fn = os.path.join(sourcedir, "cmdline") + if os.access(fn, os.F_OK): + cmd.append("--cmdline") + cmd.append(open(fn).read().rstrip("\n")) + + fn = os.path.join(sourcedir, "base") + if os.access(fn, os.F_OK): + cmd.append("--base") + cmd.append(open(fn).read().rstrip("\n")) + + fn = os.path.join(sourcedir, "tagsaddr") + if os.access(fn, os.F_OK): + cmd.append("--tags-addr") + cmd.append(open(fn).read().rstrip("\n")) + + fn = os.path.join(sourcedir, "ramdisk_offset") + if os.access(fn, os.F_OK): + cmd.append("--ramdisk_offset") + cmd.append(open(fn).read().rstrip("\n")) + + fn = os.path.join(sourcedir, "dt_args") + if os.access(fn, os.F_OK): + cmd.append("--dt") + cmd.append(open(fn).read().rstrip("\n")) + + fn = os.path.join(sourcedir, "pagesize") + if os.access(fn, os.F_OK): + cmd.append("--pagesize") + cmd.append(open(fn).read().rstrip("\n")) + + args = info_dict.get("mkbootimg_args", None) + if args and args.strip(): + cmd.extend(shlex.split(args)) + + img_unsigned = None + if info_dict.get("vboot", None): + img_unsigned = tempfile.NamedTemporaryFile() + cmd.extend(["--ramdisk", ramdisk_img.name, + "--output", img_unsigned.name]) + else: + cmd.extend(["--ramdisk", ramdisk_img.name, "--output", img.name]) - + p = Run(cmd, stdout=subprocess.PIPE) p.communicate() assert p.returncode == 0, "mkbootimg of %s image failed" % ( From bf1e6d4a76798c05c81412b0b535bdd15f0fb8d4 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 21 Nov 2014 23:38:04 -0800 Subject: [PATCH 123/309] Enable QCOM_BSP_LEGACY flag for pre-8974 targets Change-Id: I895f1b1cbad0d260e1bca87ccb61e6194de023fd --- core/qcom_target.mk | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index ae94150f8..f057337dc 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -23,6 +23,9 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK endif + # Enable legacy graphics functions + LOCAL_GLOBAL_CFLAGS += -DQCOM_BSP_LEGACY + LOCAL_GLOBAL_CPPFLAGS += -DQCOM_BSP_LEGACY endif $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) From 292921ce2ca62f02d858a70b7206b257228e3553 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Wed, 13 Aug 2014 12:27:38 -0700 Subject: [PATCH 124/309] build: Handle custom boot images properly When a pre-built image should be used, it should be stored in the target files zip so that it can be used with external signing processes. Original-Change-Id: I2661af9ac58af30bb9314b552775046d3abf44e0 Change-Id: I10b3bd0bb33489b8ffb26d16d002f8dd6ff405ad --- core/Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/Makefile b/core/Makefile index 36ec5c6d3..71121880d 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1628,6 +1628,12 @@ endif @# Contents of the data image $(hide) $(call package_files-copy-root, \ $(TARGET_OUT_DATA),$(zip_root)/DATA) +ifdef BOARD_CUSTOM_BOOTIMG + @# Prebuilt boot images + $(hide) mkdir -p $(zip_root)/BOOTABLE_IMAGES + $(hide) $(ACP) $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/BOOTABLE_IMAGES/ + $(hide) $(ACP) $(INSTALLED_RECOVERYIMAGE_TARGET) $(zip_root)/BOOTABLE_IMAGES/ +endif ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE @# Contents of the vendor image $(hide) $(call package_files-copy-root, \ From d5bca1fe5ce94923a80bc0bf43d062cce8555505 Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Wed, 1 Oct 2014 07:33:51 -0700 Subject: [PATCH 125/309] build: Use project pathmap for recovery Change-Id: I6339ac77b899a43db21261d587252b65cb58ad79 --- core/Makefile | 8 ++++---- core/config.mk | 4 ++++ core/pathmap.mk | 1 - 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/core/Makefile b/core/Makefile index 71121880d..323ad5345 100644 --- a/core/Makefile +++ b/core/Makefile @@ -811,13 +811,13 @@ ifdef INSTALLED_RECOVERYIMAGE_TARGET INTERNAL_RECOVERYIMAGE_FILES := $(filter $(TARGET_RECOVERY_OUT)/%, \ $(ALL_DEFAULT_INSTALLED_MODULES)) -recovery_initrc := $(call include-path-for, recovery)/etc/init.rc +recovery_initrc := $(call project-path-for,recovery)/etc/init.rc recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img recovery_build_prop := $(intermediate_system_build_prop) recovery_uncompressed_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.cpio -recovery_resources_common := $(call include-path-for, recovery)/res +recovery_resources_common := $(call project-path-for,recovery)/res # Set recovery_density to the density bucket of the device. recovery_density := unknown @@ -841,9 +841,9 @@ endif # its private recovery resources. ifneq (,$(filter xxxhdpi 560dpi xxhdpi 400dpi xhdpi,$(recovery_density))) -recovery_font := $(call include-path-for, recovery)/fonts/18x32.png +recovery_font := $(call project-path-for,recovery)/fonts/18x32.png else -recovery_font := $(call include-path-for, recovery)/fonts/12x22.png +recovery_font := $(call project-path-for,recovery)/fonts/12x22.png endif ifneq ($(TARGET_RECOVERY_DEVICE_DIRS),) diff --git a/core/config.mk b/core/config.mk index fe0c41620..2fb92303a 100644 --- a/core/config.mk +++ b/core/config.mk @@ -161,6 +161,10 @@ include $(BUILD_SYSTEM)/envsetup.mk # See envsetup.mk for a description of SCAN_EXCLUDE_DIRS FIND_LEAVES_EXCLUDES := $(addprefix --prune=, $(OUT_DIR) $(SCAN_EXCLUDE_DIRS) .repo .git) +# General entries for project pathmap. Any entries listed here should +# be device and hardware independent. +$(call project-set-path-variant,recovery,RECOVERY_VARIANT,bootable/recovery) + -include vendor/extra/BoardConfigExtra.mk # The build system exposes several variables for where to find the kernel # headers: diff --git a/core/pathmap.mk b/core/pathmap.mk index 699eac15e..a23aafca4 100644 --- a/core/pathmap.mk +++ b/core/pathmap.mk @@ -41,7 +41,6 @@ pathmap_INCL := \ libstdc++:bionic/libstdc++/include \ mkbootimg:system/core/mkbootimg \ opengl-tests-includes:frameworks/native/opengl/tests/include \ - recovery:bootable/recovery \ system-core:system/core/include \ audio:system/media/audio/include \ audio-effects:system/media/audio_effects/include \ From 32bc0e5047ec7312657a0e4218d051b1c53a6c79 Mon Sep 17 00:00:00 2001 From: Pawit Pornkitprasan Date: Wed, 19 Nov 2014 23:22:41 +0700 Subject: [PATCH 126/309] build: clean up otasigcheck - Only mount and unmount /data if it was originally unmounted - Don't use comparison hack, just check the result of the script Change-Id: I4a22485d315cf91e95ce578907c49f5fa3a03222 --- tools/releasetools/edify_generator.py | 4 +--- tools/releasetools/ota_from_target_files | 5 +++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index cc5e33b9f..a335a2e1d 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -168,9 +168,7 @@ def ValidateSignatures(self, command): self.script.append('package_extract_file("system/bin/otasigcheck.sh", "/tmp/otasigcheck.sh");') self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");') self.script.append('set_metadata("/tmp/otasigcheck.sh", "uid", 0, "gid", 0, "mode", 0755);') - self.script.append('run_program("/tmp/otasigcheck.sh");') - ## Hax: a failure from run_program doesn't trigger an abort, so have it change the key value and check for "INVALID" - self.script.append('sha1_check(read_file("/tmp/releasekey"),"7241e92725436afc79389d4fc2333a2aa8c20230") && abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') + self.script.append('run_program("/tmp/otasigcheck.sh") == "0" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') def ShowProgress(self, frac, dur): """Update the progress bar, advancing it over 'frac' over the next diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index dc5995fcd..04b348504 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -625,9 +625,14 @@ else if get_stage("%(bcb_dev)s") == "3/3" then if block_based: common.ZipWriteStr(output_zip, "system/bin/otasigcheck.sh", ""+input_zip.read("SYSTEM/bin/otasigcheck.sh")) + + script.AppendExtra("if is_mounted(\"/data\") then") + script.ValidateSignatures("data") + script.AppendExtra("else") script.Mount("/data") script.ValidateSignatures("data") script.Unmount("/data") + script.AppendExtra("endif;") if "selinux_fc" in OPTIONS.info_dict: WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) From 4462345ecead86f949ad17d63a909a32d3b57651 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Thu, 19 Jun 2014 01:45:15 +0100 Subject: [PATCH 127/309] ota: Let devices specify their own recovery-from-boot.p installer Some devices apply transformations to the installed images, making the sha1 checksums fail (or, worse, generating invalid images). If "/system/etc/recovery-transform.sh" exists, run that instead, passing the expected sizes and checksums as arguments in the form of recovery-transform.sh A direct emulation of the standard patcher could look like this, transformations should be added as needed. ---------------- RECSIZE=$1 RECSHA1=$2 BOOTSIZE=$3 BOOTSHA1=$4 dd if=/dev/block/platform/msm_sdcc.1/by-name/recovery of=$C/recovery.img dd if=/dev/block/platform/msm_sdcc.1/by-name/boot of=$C/boot.img if ! applypatch -c EMMC:$C/recovery.img:$RECSIZE:$RECSHA1; then log -t recovery "Installing new recovery image" applypatch -b /system/etc/recovery-resource.dat EMMC:$C/boot.img:$BOOTSIZE:$BOOTSHA1 EMMC:$C/recovery.img $RECSHA1 $RECSIZE $BOOTSHA1:/system/recovery-from-boot.p || exit 1 else log -t recovery "Recovery image already installed" fi ---------------- Conflicts: tools/releasetools/ota_from_target_files Change-Id: Ie601841ca1cdad6b8f3b16e593d2718a92e8ca09 --- tools/releasetools/common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 36df1c29e..4afed84c7 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -1432,6 +1432,10 @@ def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, return sh = """#!/system/bin/sh +if [ -f /system/etc/recovery-transform.sh ]; then + exec sh /system/etc/recovery-transform.sh %(recovery_size)d %(recovery_sha1)s %(boot_size)d %(boot_sha1)s +fi + if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed" else From 76842622361126a369cd325af87a6390ebe94a0e Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Fri, 21 Nov 2014 00:46:52 -0600 Subject: [PATCH 128/309] build: Ensure /system unmounted at install start Account for the possibility that a user has mounted system manually before installing the FullOTA package. Change-Id: I97d0540676a7ce98762545e76ecb1a7f6335d204 --- tools/releasetools/ota_from_target_files | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 04b348504..d5d1a395a 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -603,6 +603,7 @@ else if get_stage("%(bcb_dev)s") == "3/3" then script.Print("Target: %s" % CalculateFingerprint( oem_props, oem_dict, OPTIONS.info_dict)) + script.AppendExtra("ifelse(is_mounted(\"/system\"), unmount(\"/system\"));") device_specific.FullOTA_InstallBegin() if OPTIONS.backuptool: From b8bbea3dd5d6c6e72d86cdfdc460e9cdbd8b5f0b Mon Sep 17 00:00:00 2001 From: M1cha Date: Tue, 25 Nov 2014 15:30:48 +0100 Subject: [PATCH 129/309] releasetools: ota_from_target_files: add FullOTA_PostValidate Change-Id: I152412049f90fd546d4516cc064238c3192be553 --- tools/releasetools/common.py | 5 +++++ tools/releasetools/ota_from_target_files | 2 ++ 2 files changed, 7 insertions(+) diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 4afed84c7..5fb071011 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -1017,6 +1017,11 @@ def FullOTA_InstallEnd(self): used to install the image for the device's baseband processor.""" return self._DoCall("FullOTA_InstallEnd") + def FullOTA_PostValidate(self): + """Called after installing and validating /system; typically this is + used to resize the system partition after a block based installation.""" + return self._DoCall("FullOTA_PostValidate") + def IncrementalOTA_Assertions(self): """Called after emitting the block of assertions at the top of an incremental OTA package. Implementations can add whatever diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index d5d1a395a..edfaedf4b 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -705,6 +705,8 @@ else if get_stage("%(bcb_dev)s") == "3/3" then if block_based: script.Unmount("/system") + device_specific.FullOTA_PostValidate() + if OPTIONS.backuptool: script.ShowProgress(0.02, 10) if block_based: From 31f32000597f01ce92ac9af844d0c766298bf31e Mon Sep 17 00:00:00 2001 From: Brandon Bennett Date: Sat, 19 Nov 2011 16:02:04 -0700 Subject: [PATCH 130/309] Add ext2, ext3, and vfat to releasetools Support some non-standard partition that may be found in recovery.fstab Conflicts: tools/releasetools/common.py Change-Id: I0026a1d3ae5d432db10150d1a15f7bc8e7a6054b --- tools/releasetools/common.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 5fb071011..3eac6bf9a 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -1372,7 +1372,10 @@ def _CheckFirstBlock(self, script): "ext4": "EMMC", "emmc": "EMMC", "f2fs": "EMMC", - "squashfs": "EMMC" + "squashfs": "EMMC", + "ext2": "EMMC", + "ext3": "EMMC", + "vfat": "EMMC" } } def GetTypeAndDevice(mount_point, info): From 3474175cdadfa2b0507aea973ddc8151a0dfda0d Mon Sep 17 00:00:00 2001 From: Cristoforo Cataldo Date: Sun, 30 Nov 2014 02:06:42 +0100 Subject: [PATCH 131/309] core: Enable -mcpu=cortex-a8 flag for Scorpion cpu variant To be used with http://review.cyanogenmod.org/#/c/77758/ Change-Id: I7ecc4707fa45bd7098165615c0521a12c85fb087 --- core/combo/arch/arm/armv7-a-neon.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk index 99f17aa6d..c924c55a9 100644 --- a/core/combo/arch/arm/armv7-a-neon.mk +++ b/core/combo/arch/arm/armv7-a-neon.mk @@ -19,7 +19,7 @@ ifneq (,$(filter cortex-a15 krait denver,$(TARGET_$(combo_2nd_arch_prefix)CPU_VA arch_variant_ldflags := \ -Wl,--no-fix-cortex-a8 else -ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a8) +ifneq (,$(filter cortex-a8 scorpion,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT))) arch_variant_cflags := -mcpu=cortex-a8 arch_variant_ldflags := \ -Wl,--fix-cortex-a8 From bb70e390393e9046aa66520f978e53160c759622 Mon Sep 17 00:00:00 2001 From: Cristoforo Cataldo Date: Sun, 30 Nov 2014 06:12:35 +0100 Subject: [PATCH 132/309] core: Enable -mcpu=cortex-a9 flag for Cortex-A9 cpu variant Change-Id: I9294a518bcdc21ccbae72eadd9f3c1a12982d028 --- core/combo/arch/arm/armv7-a-neon.mk | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk index c924c55a9..915f12ea5 100644 --- a/core/combo/arch/arm/armv7-a-neon.mk +++ b/core/combo/arch/arm/armv7-a-neon.mk @@ -19,6 +19,9 @@ ifneq (,$(filter cortex-a15 krait denver,$(TARGET_$(combo_2nd_arch_prefix)CPU_VA arch_variant_ldflags := \ -Wl,--no-fix-cortex-a8 else +ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a9) + arch_variant_cflags := -mcpu=cortex-a9 +else ifneq (,$(filter cortex-a8 scorpion,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT))) arch_variant_cflags := -mcpu=cortex-a8 arch_variant_ldflags := \ From 6483f411e79667273dce0694024a7ee83a3671bd Mon Sep 17 00:00:00 2001 From: Arne Coucheron Date: Sun, 30 Nov 2014 09:13:15 +0100 Subject: [PATCH 133/309] core: armv7-a-neon.mk: Add missing endif Change-Id: Id6f9c952d01d3c980115a52605d9c86038b3b5bd --- core/combo/arch/arm/armv7-a-neon.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk index 915f12ea5..2822624c7 100644 --- a/core/combo/arch/arm/armv7-a-neon.mk +++ b/core/combo/arch/arm/armv7-a-neon.mk @@ -39,6 +39,7 @@ else endif endif endif +endif arch_variant_cflags += \ -mfloat-abi=softfp \ From 91347e2835d22225836b88a3f4cf3e586cb854ab Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Tue, 2 Dec 2014 02:01:56 -0500 Subject: [PATCH 134/309] Fix ro.build.product not found by ota_from_target_files in some cases After I5dccba2172dade3dacc55d832a2042fce306b5f5 it was possible that if override_prop was set and override_device was not set the script was looking for a prop that did not exist. Change-Id: I444a33de5bcb59f129bdcf631c2a6540c5926545 --- core/Makefile | 3 +++ tools/releasetools/ota_from_target_files | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/core/Makefile b/core/Makefile index 323ad5345..d8788a60c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1750,6 +1750,9 @@ endif ifneq ($(TARGET_UNIFIED_DEVICE),) $(INTERNAL_OTA_PACKAGE_TARGET): override_prop := --override_prop=true + ifeq ($(TARGET_OTA_ASSERT_DEVICE),) + $(INTERNAL_OTA_PACKAGE_TARGET): override_device := $(TARGET_DEVICE) + endif endif $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index edfaedf4b..6d4c559fd 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -422,10 +422,7 @@ def AppendAssertions(script, info_dict, oem_dict=None): oem_props = info_dict.get("oem_fingerprint_properties") if oem_props is None or len(oem_props) == 0: if OPTIONS.override_device == "auto": - if OPTIONS.override_prop: - device = GetBuildProp("ro.build.product", info_dict) - else: - device = GetBuildProp("ro.product.device", info_dict) + device = GetBuildProp("ro.product.device", info_dict) else: device = OPTIONS.override_device script.AssertDevice(device) @@ -1204,12 +1201,16 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): oem_dict = common.LoadDictionaryFromLines( open(OPTIONS.oem_source).readlines()) - metadata = { - "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, - OPTIONS.source_info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } + if OPTIONS.override_prop: + metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + else: + metadata = {"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, + OPTIONS.source_info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } device_specific = common.DeviceSpecificParams( source_zip=source_zip, From 52c52a2cff050e16633304cf367e3aff45b9ef97 Mon Sep 17 00:00:00 2001 From: Alvin Francis Date: Mon, 1 Dec 2014 13:13:40 -0400 Subject: [PATCH 135/309] Fix libelf path Fixes kernel build Change-Id: Ic6cdb1734d7b865491e8e61403d32db4acafe514 Signed-off-by: Alvin Francis --- core/tasks/kernel.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index b0ab1def9..e6be63a41 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -147,7 +147,7 @@ ifeq ($(TARGET_ARCH),arm) endif ifeq ($(HOST_OS),darwin) - MAKE_FLAGS := C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/libelf + MAKE_FLAGS := C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/0.153/libelf/ endif ifeq ($(TARGET_KERNEL_MODULES),) From 003d7b3954fca5e95886cbeeb074f98a65bf2f4f Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 7 Dec 2014 16:42:28 -0800 Subject: [PATCH 136/309] build: Parse TARGET_COPY_FILES_OVERRIDES late in the setup Change-Id: Ie3e4a168ff224e3b65e4627c0757e8813a02167f Conflicts: core/config.mk --- core/config.mk | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/config.mk b/core/config.mk index 2fb92303a..2db541b73 100644 --- a/core/config.mk +++ b/core/config.mk @@ -695,6 +695,17 @@ RSCOMPAT_NO_USAGEIO_API_LEVELS := 8 9 10 11 12 13 # Rules for QCOM targets include $(BUILD_SYSTEM)/qcom_target.mk +# We might want to skip items listed in PRODUCT_COPY_FILES based on +# various target flags. This is useful for replacing a binary module with one +# built from source. This should be a list of destination files under $OUT +# +TARGET_COPY_FILES_OVERRIDES := \ + $(addprefix %:, $(strip $(TARGET_COPY_FILES_OVERRIDES))) + +ifneq ($(TARGET_COPY_FILES_OVERRIDES),) + PRODUCT_COPY_FILES := $(filter-out $(TARGET_COPY_FILES_OVERRIDES), $(PRODUCT_COPY_FILES)) +endif + ifneq ($(SLIM_BUILD),) ## We need to be sure the global selinux policies are included ## last, to avoid accidental resetting by device configs From 2fda555209bf6e4c4f010900cea4e9a4309d6e6e Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Tue, 9 Dec 2014 07:28:17 +0100 Subject: [PATCH 137/309] build: Fix QCOM_BSP_LEGACY cflags Change-Id: I880f32892d9e082e3ba92878414f3cb3c6f08066 --- core/qcom_target.mk | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index f057337dc..feaf9655e 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -5,7 +5,6 @@ define qcom-set-path-variant $(call project-set-path-variant,qcom-$(2),TARGET_QCOM_$(1)_VARIANT,hardware/qcom/$(2)) endef -# Enable DirectTrack on QCOM legacy boards ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_GLOBAL_CFLAGS += -DQCOM_HARDWARE @@ -23,9 +22,8 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK endif - # Enable legacy graphics functions - LOCAL_GLOBAL_CFLAGS += -DQCOM_BSP_LEGACY - LOCAL_GLOBAL_CPPFLAGS += -DQCOM_BSP_LEGACY + # Enable legacy graphics functions + TARGET_USES_QCOM_BSP_LEGACY := true endif $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) From c0dacee3e18f4bc5b3f66539a59d73657eb2897e Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Sun, 14 Dec 2014 01:47:02 -0500 Subject: [PATCH 138/309] Fix QCOM_BSP_LEGACY Checks in the Android.mk files do not enable the cflags for .h files, causing a build that crashes constantly. Change-Id: I315c760488445629fda860ba70066417c7d68b8b --- core/qcom_target.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index feaf9655e..17147e5f1 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -24,6 +24,8 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) endif # Enable legacy graphics functions TARGET_USES_QCOM_BSP_LEGACY := true + TARGET_GLOBAL_CFLAGS += -DQCOM_BSP_LEGACY + TARGET_GLOBAL_CPPFLAGS += -DQCOM_BSP_LEGACY endif $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) From f0788cc67aad87bb99663d4a040819ba1fe3b3ec Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Mon, 15 Dec 2014 18:44:31 -0800 Subject: [PATCH 139/309] build: Clean up QCOM flag definitions Change-Id: I66bca2db83260ccd65b82e540ee9f7961f00b030 --- core/qcom_target.mk | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 17147e5f1..c715687f8 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -7,27 +7,25 @@ endef ifeq ($(BOARD_USES_QCOM_HARDWARE),true) - TARGET_GLOBAL_CFLAGS += -DQCOM_HARDWARE - TARGET_GLOBAL_CPPFLAGS += -DQCOM_HARDWARE + qcom_flags := -DQCOM_HARDWARE + qcom_flags += -DQCOM_BSP TARGET_USES_QCOM_BSP := true - TARGET_GLOBAL_CFLAGS += -DQCOM_BSP - TARGET_GLOBAL_CPPFLAGS += -DQCOM_BSP - TARGET_ENABLE_QC_AV_ENHANCEMENTS := true # Enable DirectTrack for legacy targets ifneq ($(filter msm7x30 msm8660 msm8960,$(TARGET_BOARD_PLATFORM)),) - ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) - TARGET_GLOBAL_CFLAGS += -DQCOM_DIRECTTRACK - TARGET_GLOBAL_CPPFLAGS += -DQCOM_DIRECTTRACK - endif + ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) + qcom_flags += -DQCOM_DIRECTTRACK + endif # Enable legacy graphics functions TARGET_USES_QCOM_BSP_LEGACY := true - TARGET_GLOBAL_CFLAGS += -DQCOM_BSP_LEGACY - TARGET_GLOBAL_CPPFLAGS += -DQCOM_BSP_LEGACY + qcom_flags += -DQCOM_BSP_LEGACY endif + TARGET_GLOBAL_CFLAGS += $(qcom_flags) + TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) + $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,CAMERA,camera) $(call project-set-path,qcom-display,hardware/qcom/display-caf/$(TARGET_BOARD_PLATFORM)) From 8b2f2f56ff9e79c0d8925b0e6fa1feba62d03a65 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Mon, 15 Dec 2014 18:45:19 -0800 Subject: [PATCH 140/309] build: Add QCOM flags to Clang CFLAGS Change-Id: I56f0d4106f5d3d27c1ace744d30c1c81f0052bbd --- core/qcom_target.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index c715687f8..950375dc9 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -25,6 +25,8 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_GLOBAL_CFLAGS += $(qcom_flags) TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) + CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) + CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,CAMERA,camera) From 7d3b96df34ba3dca1857cab7df1a9f5f7b5a6f3c Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Tue, 16 Dec 2014 16:31:38 +0200 Subject: [PATCH 141/309] build: Remove unused qcom CFLAG Change-Id: I6ead3e57899bcb007d4d284901f918a65a9e6926 --- core/qcom_target.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 950375dc9..5c5635738 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -19,7 +19,6 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) qcom_flags += -DQCOM_DIRECTTRACK endif # Enable legacy graphics functions - TARGET_USES_QCOM_BSP_LEGACY := true qcom_flags += -DQCOM_BSP_LEGACY endif From de2f6f14c5cac904b8841370efd154ce22780c05 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Wed, 7 May 2014 17:28:49 +0100 Subject: [PATCH 142/309] Store the base64 release key in the OTA zips This can be directly grepped in pre-existing package.xml tables to make sure we're not trying to update to a differently signed build Change-Id: I7528a8e7c484ea9209cd665b9263328ae834586a --- core/Makefile | 5 +++++ tools/releasetools/ota_from_target_files | 3 ++- tools/releasetools/sign_target_files_apks | 8 ++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/core/Makefile b/core/Makefile index d8788a60c..b83884878 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1689,6 +1689,11 @@ endif $(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt) $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root) +ifdef PRODUCT_DEFAULT_DEV_CERTIFICATE + $(hide) build/tools/getb64key.py $(PRODUCT_DEFAULT_DEV_CERTIFICATE).x509.pem > $(zip_root)/META/releasekey.txt +else + $(hide) build/tools/getb64key.py $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem > $(zip_root)/META/releasekey.txt +endif @# Zip everything up, preserving symlinks $(hide) (cd $(zip_root) && zip -qry ../$(notdir $@) .) @# Run fs_config on all the system, vendor, boot ramdisk, diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 6d4c559fd..6c5a8fbda 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -742,6 +742,8 @@ endif; script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) WriteMetadata(metadata, output_zip) + common.ZipWriteStr(output_zip, "META-INF/org/cyanogenmod/releasekey", + ""+input_zip.read("META/releasekey.txt")) def WritePolicyConfig(file_name, output_zip): common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) @@ -1559,7 +1561,6 @@ endif; WriteMetadata(metadata, output_zip) - def main(argv): def option_handler(o, a): diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks index 4538196ea..290fdb990 100755 --- a/tools/releasetools/sign_target_files_apks +++ b/tools/releasetools/sign_target_files_apks @@ -385,6 +385,14 @@ def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", new_recovery_keys) + # Save the base64 key representation in the update for key-change + # validations + p = common.Run(["python", "build/tools/getb64key.py", mapped_keys[0]], + stdout=subprocess.PIPE) + data, _ = p.communicate() + if p.returncode == 0: + common.ZipWriteStr(output_tf_zip, "META/releasekey.txt", data) + # SystemUpdateActivity uses the x509.pem version of the keys, but # put into a zipfile system/etc/security/otacerts.zip. # We DO NOT include the extra_recovery_keys (if any) here. From 9d44d62a6fdb2410777931dffc538c54a69c615d Mon Sep 17 00:00:00 2001 From: Dan Pasanen Date: Thu, 18 Dec 2014 11:26:37 -0600 Subject: [PATCH 143/309] releasetools: allow devices to specify a custom make_recovery_patch Change-Id: I891660c1bf919b369afd4ff496fb6bab85dffe3f --- core/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/Makefile b/core/Makefile index b83884878..7d65bd8f3 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1687,8 +1687,13 @@ ifdef BUILD_NO $(hide) echo "build_number=$(BUILD_NO)" >> $(zip_root)/META/misc_info.txt endif $(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt) +ifeq ($(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT),) $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root) +else + $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ + $(hide) $(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT) $(zip_root) $(zip_root) +endif ifdef PRODUCT_DEFAULT_DEV_CERTIFICATE $(hide) build/tools/getb64key.py $(PRODUCT_DEFAULT_DEV_CERTIFICATE).x509.pem > $(zip_root)/META/releasekey.txt else From 4a9f5194e13ea66cba67293313d286f4550e12cc Mon Sep 17 00:00:00 2001 From: Pawit Pornkitprasan Date: Mon, 22 Dec 2014 20:11:47 +0700 Subject: [PATCH 144/309] build: fix dopush for SystemUI The path for SystemUI has changed in Lollipop Change-Id: Ib2126f48f6769e5f91d0aa1d44d4049bdbe1deff --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index 8f01f479f..b8d5b9031 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -2093,7 +2093,7 @@ function dopush() esac case $TARGET in - /system/priv-app/SystemUI.apk|/system/framework/*) + /system/priv-app/SystemUI/SystemUI.apk|/system/framework/*) # Only need to stop services once if ! $stop_n_start; then adb shell stop From 6b631eeb4fca9794f641ec1e5f40cdd1acc60b7e Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Fri, 28 Sep 2012 11:56:02 +0530 Subject: [PATCH 145/309] envsetup: new function cmka - The build system adds a clean- target for each module / package. - This utilizes that and cleans and builds using mka. - If no argument is supplied, it runs "mka clean". - If argument is (bacon|otapackage|systemimage), it runs installclean. Change-Id: I582ac2f92df3278c41c254c73e9c02ad7f406851 (cherry picked from commit eb74c2f2b9bb708e77edfdbb8ba0d0b716fef2fc) (Also sneak in another minor change, a help entry for mkap) --- envsetup.sh | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index b8d5b9031..2b1f834b9 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -28,6 +28,8 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - aospremote: Add git remote for matching AOSP repository - cafremote: Add git remote for matching CodeAurora repository. - mka: Builds using SCHED_BATCH on all processors +- mkap: Builds the module(s) using mka and pushes them to the device. +- cmka: Cleans and builds using mka. - reposync: Parallel repo sync using ionice and SCHED_BATCH - installboot: Installs a boot.img to the connected device. - installrecovery: Installs a recovery.img to the connected device. @@ -1904,6 +1906,26 @@ function mka() { esac } +function cmka() { + if [ ! -z "$1" ]; then + for i in "$@"; do + case $i in + bacon|otapackage|systemimage) + mka installclean + mka $i + ;; + *) + mka clean-$i + mka $i + ;; + esac + done + else + mka clean + mka + fi +} + function reposync() { case `uname -s` in Darwin) From 2d98b0cb22bea48d1728519d5c8f33ad40ae4d09 Mon Sep 17 00:00:00 2001 From: Dan Pasanen Date: Sun, 21 Dec 2014 23:18:16 -0600 Subject: [PATCH 146/309] releasetools: clean up residual targetfiles-* dirs in /tmp/ Change-Id: I3792307663f8a85513e102c8bf3d78cf4e514760 --- tools/releasetools/common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 3eac6bf9a..c75030451 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -480,6 +480,7 @@ def UnzipTemp(filename, pattern=None): OPTIONS.tempfiles.append(tmp) def unzip_to_dir(filename, dirname): + cmd = ["rm", "-rf", dirname + filename, "targetfiles-*"] cmd = ["unzip", "-o", "-q", filename, "-d", dirname] if pattern is not None: cmd.append(pattern) From 0cca94734db194ef9dc9515b3cc2ccb7dfb7637b Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Wed, 24 Dec 2014 21:58:01 -0800 Subject: [PATCH 147/309] build: Set QCOM global cflags for secondary arch Change-Id: I637d94f9b0ddee2c8d6057de44355e482163dd36 --- core/qcom_target.mk | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 5c5635738..521dc1c80 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -27,6 +27,12 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) + # Multiarch needs these too.. + 2ND_TARGET_GLOBAL_CFLAGS += $(qcom_flags) + 2ND_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) + 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) + 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) + $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,CAMERA,camera) $(call project-set-path,qcom-display,hardware/qcom/display-caf/$(TARGET_BOARD_PLATFORM)) From eb811d63d8073a688b5be6979bde4b8774dca127 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 26 Dec 2014 00:20:55 -0800 Subject: [PATCH 148/309] build: Fix copypasta in 2nd target clang flags Change-Id: Ieedce8754b145e53d72a5d6a4b41fbe5cdf6473f --- core/qcom_target.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 521dc1c80..68f332751 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -31,7 +31,7 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) 2ND_TARGET_GLOBAL_CFLAGS += $(qcom_flags) 2ND_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) - 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) + 2ND_CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,CAMERA,camera) From 21f28b5fcb91567ca15bc28e3b65147c9307a1fa Mon Sep 17 00:00:00 2001 From: Ameya Thakur Date: Mon, 29 Jul 2013 17:39:37 -0700 Subject: [PATCH 149/309] build: Add changes to release tools and mkbootimg Change the prototype of LoadRecoveryFstab to take in the device type as a argument. Fix the case where mkbootimg was being passed a incorrect argument. Change-Id: Ic6ac596d8d96d3a5effbdf513763ec1cb92b1a03 --- core/Makefile | 7 +++++++ tools/releasetools/common.py | 11 +++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index 7d65bd8f3..76fec8953 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1587,6 +1587,9 @@ ifdef INSTALLED_2NDBOOTLOADER_TARGET $(hide) $(ACP) \ $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/RECOVERY/second endif +ifdef BOARD_KERNEL_TAGS_OFFSET + $(hide) echo "$(BOARD_KERNEL_TAGS_OFFSET)" > $(zip_root)/RECOVERY/tags_offset +endif ifdef BOARD_KERNEL_CMDLINE $(hide) echo "$(BOARD_KERNEL_CMDLINE)" > $(zip_root)/RECOVERY/cmdline endif @@ -1607,6 +1610,10 @@ ifdef INSTALLED_2NDBOOTLOADER_TARGET $(hide) $(ACP) \ $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second endif + +ifdef BOARD_KERNEL_TAGS_OFFSET + $(hide) echo "$(BOARD_KERNEL_TAGS_OFFSET)" > $(zip_root)/BOOT/tags_offset +endif ifdef BOARD_KERNEL_CMDLINE $(hide) echo "$(BOARD_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline endif diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index c75030451..81ad855ab 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -150,6 +150,8 @@ def read_helper(fn): if "fstab_version" not in d: d["fstab_version"] = "1" + if "device_type" not in d: + d["device_type"] = "MMC" try: data = read_helper("META/imagesizes.txt") for line in data.split("\n"): @@ -179,7 +181,7 @@ def makeint(key): makeint("boot_size") makeint("fstab_version") - d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"]) + d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"], d["device_type"]) d["build.prop"] = LoadBuildProp(read_helper) return d @@ -202,7 +204,7 @@ def LoadDictionaryFromLines(lines): d[name] = value return d -def LoadRecoveryFSTab(read_helper, fstab_version): +def LoadRecoveryFSTab(read_helper, fstab_version, type): class Partition(object): def __init__(self, mount_point, fs_type, device, length, device2, context): self.mount_point = mount_point @@ -366,6 +368,11 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): cmd.append("--tags-addr") cmd.append(open(fn).read().rstrip("\n")) + fn = os.path.join(sourcedir, "tags_offset") + if os.access(fn, os.F_OK): + cmd.append("--tags_offset") + cmd.append(open(fn).read().rstrip("\n")) + fn = os.path.join(sourcedir, "ramdisk_offset") if os.access(fn, os.F_OK): cmd.append("--ramdisk_offset") From 7aadb3e0cc31bc6613ba4189b3e725075f0b97ec Mon Sep 17 00:00:00 2001 From: Clark Scheff Date: Tue, 23 Dec 2014 13:30:51 -0800 Subject: [PATCH 150/309] Add ddclient function for debugging with DDD While debugging via the CLI is great, it is nice to have the option to debug using a graphical front end. This patch adds a function for using ddd to debug native code. Simply replace gdbclient with dddclient and start debugging. Change-Id: I3d3afe08772007b11ad6e0f839868e85386340f1 Note: Requires that ddd be installed on the host PC. --- envsetup.sh | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 2b1f834b9..c73a42dcc 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1214,6 +1214,106 @@ function is64bit() fi } +function dddclient() +{ + local OUT_ROOT=$(get_abs_build_var PRODUCT_OUT) + local OUT_SYMBOLS=$(get_abs_build_var TARGET_OUT_UNSTRIPPED) + local OUT_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_SHARED_LIBRARIES_UNSTRIPPED) + local OUT_VENDOR_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_VENDOR_SHARED_LIBRARIES_UNSTRIPPED) + local OUT_EXE_SYMBOLS=$(get_symbols_directory) + local PREBUILTS=$(get_abs_build_var ANDROID_PREBUILTS) + local ARCH=$(get_build_var TARGET_ARCH) + local GDB + case "$ARCH" in + arm) GDB=arm-linux-androideabi-gdb;; + arm64) GDB=arm-linux-androideabi-gdb; GDB64=aarch64-linux-android-gdb;; + mips|mips64) GDB=mips64el-linux-android-gdb;; + x86) GDB=x86_64-linux-android-gdb;; + x86_64) GDB=x86_64-linux-android-gdb;; + *) echo "Unknown arch $ARCH"; return 1;; + esac + + if [ "$OUT_ROOT" -a "$PREBUILTS" ]; then + local EXE="$1" + if [ "$EXE" ] ; then + EXE=$1 + if [[ $EXE =~ ^[^/].* ]] ; then + EXE="system/bin/"$EXE + fi + else + EXE="app_process" + fi + + local PORT="$2" + if [ "$PORT" ] ; then + PORT=$2 + else + PORT=":5039" + fi + + local PID="$3" + if [ "$PID" ] ; then + if [[ ! "$PID" =~ ^[0-9]+$ ]] ; then + PID=`pid $3` + if [[ ! "$PID" =~ ^[0-9]+$ ]] ; then + # that likely didn't work because of returning multiple processes + # try again, filtering by root processes (don't contain colon) + PID=`adb shell ps | \grep $3 | \grep -v ":" | awk '{print $2}'` + if [[ ! "$PID" =~ ^[0-9]+$ ]] + then + echo "Couldn't resolve '$3' to single PID" + return 1 + else + echo "" + echo "WARNING: multiple processes matching '$3' observed, using root process" + echo "" + fi + fi + fi + adb forward "tcp$PORT" "tcp$PORT" + local USE64BIT="$(is64bit $PID)" + adb shell gdbserver$USE64BIT $PORT --attach $PID & + sleep 2 + else + echo "" + echo "If you haven't done so already, do this first on the device:" + echo " gdbserver $PORT /system/bin/$EXE" + echo " or" + echo " gdbserver $PORT --attach " + echo "" + fi + + OUT_SO_SYMBOLS=$OUT_SO_SYMBOLS$USE64BIT + OUT_VENDOR_SO_SYMBOLS=$OUT_VENDOR_SO_SYMBOLS$USE64BIT + + echo >|"$OUT_ROOT/gdbclient.cmds" "set solib-absolute-prefix $OUT_SYMBOLS" + echo >>"$OUT_ROOT/gdbclient.cmds" "set solib-search-path $OUT_SO_SYMBOLS:$OUT_SO_SYMBOLS/hw:$OUT_SO_SYMBOLS/ssl/engines:$OUT_SO_SYMBOLS/drm:$OUT_SO_SYMBOLS/egl:$OUT_SO_SYMBOLS/soundfx:$OUT_VENDOR_SO_SYMBOLS:$OUT_VENDOR_SO_SYMBOLS/hw:$OUT_VENDOR_SO_SYMBOLS/egl" + echo >>"$OUT_ROOT/gdbclient.cmds" "source $ANDROID_BUILD_TOP/development/scripts/gdb/dalvik.gdb" + echo >>"$OUT_ROOT/gdbclient.cmds" "target remote $PORT" + # Enable special debugging for ART processes. + if [[ $EXE =~ (^|/)(app_process|dalvikvm)(|32|64)$ ]]; then + echo >> "$OUT_ROOT/gdbclient.cmds" "art-on" + fi + echo >>"$OUT_ROOT/gdbclient.cmds" "" + + local WHICH_GDB= + # 64-bit exe found + if [ "$USE64BIT" != "" ] ; then + WHICH_GDB=$ANDROID_TOOLCHAIN/$GDB64 + # 32-bit exe / 32-bit platform + elif [ "$(get_build_var TARGET_2ND_ARCH)" = "" ]; then + WHICH_GDB=$ANDROID_TOOLCHAIN/$GDB + # 32-bit exe / 64-bit platform + else + WHICH_GDB=$ANDROID_TOOLCHAIN_2ND_ARCH/$GDB + fi + + ddd --debugger $WHICH_GDB -x "$OUT_ROOT/gdbclient.cmds" "$OUT_EXE_SYMBOLS/$EXE" + else + echo "Unable to determine build system output dir." + fi +} + case `uname -s` in Darwin) function sgrep() From ed94055722f0623c087de308a6084c16a4ee6815 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Fri, 21 Nov 2014 11:51:03 -0800 Subject: [PATCH 151/309] build: Allow devices to specify a RIL variant Change-Id: Ia5d35d916be358fb7be7ead6bd05cc3628b35a37 --- core/qcom_target.mk | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 68f332751..1e2066a3b 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -4,6 +4,9 @@ define qcom-set-path-variant $(call project-set-path-variant,qcom-$(2),TARGET_QCOM_$(1)_VARIANT,hardware/qcom/$(2)) endef +define ril-set-path-variant +$(call project-set-path-variant,ril,TARGET_RIL_VARIANT,hardware/$(1)) +endef ifeq ($(BOARD_USES_QCOM_HARDWARE),true) @@ -39,6 +42,7 @@ $(call project-set-path,qcom-display,hardware/qcom/display-caf/$(TARGET_BOARD_PL $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,SENSORS,sensors) +$(call ril-set-path-variant,ril) else $(call project-set-path,qcom-audio,hardware/qcom/audio/default) $(call qcom-set-path-variant,CAMERA,camera) @@ -46,4 +50,5 @@ $(call project-set-path,qcom-display,hardware/qcom/display/$(TARGET_BOARD_PLATFO $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media/default) $(call qcom-set-path-variant,SENSORS,sensors) +$(call ril-set-path-variant,ril) endif From a9bb776b489fce05358cde3993014df393b18861 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Sun, 28 Dec 2014 02:53:03 -0800 Subject: [PATCH 152/309] core: Get the RIL variant path into the global includes * Need to have this in the global path, so shuffle things around a little. Change-Id: I2dd2b378d17ff3fa0e379793df31d68964d87278 --- core/config.mk | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/core/config.mk b/core/config.mk index 2db541b73..b95e974cd 100644 --- a/core/config.mk +++ b/core/config.mk @@ -41,7 +41,6 @@ SRC_HEADERS := \ $(TOPDIR)system/media/audio/include \ $(TOPDIR)hardware/libhardware/include \ $(TOPDIR)hardware/libhardware_legacy/include \ - $(TOPDIR)hardware/ril/include \ $(TOPDIR)libnativehelper/include \ $(TOPDIR)frameworks/native/include \ $(TOPDIR)frameworks/native/opengl/include \ @@ -541,10 +540,12 @@ else DEFAULT_SYSTEM_DEV_CERTIFICATE := build/target/product/security/testkey endif +# Rules for QCOM targets +include $(BUILD_SYSTEM)/qcom_target.mk + # ############################################################### # Set up final options. # ############################################################### - HOST_GLOBAL_CFLAGS += $(COMMON_GLOBAL_CFLAGS) HOST_RELEASE_CFLAGS += $(COMMON_RELEASE_CFLAGS) @@ -561,7 +562,8 @@ HOST_GLOBAL_LD_DIRS += -L$(HOST_OUT_INTERMEDIATE_LIBRARIES) TARGET_GLOBAL_LD_DIRS += -L$(TARGET_OUT_INTERMEDIATE_LIBRARIES) HOST_PROJECT_INCLUDES:= $(SRC_HEADERS) $(SRC_HOST_HEADERS) $(HOST_OUT_HEADERS) -TARGET_PROJECT_INCLUDES:= $(SRC_HEADERS) $(TARGET_OUT_HEADERS) \ +TARGET_PROJECT_INCLUDES:= $(SRC_HEADERS) $(TOPDIR)$(call project-path-for,ril)/include \ + $(TARGET_OUT_HEADERS) \ $(TARGET_DEVICE_KERNEL_HEADERS) $(TARGET_BOARD_KERNEL_HEADERS) \ $(TARGET_PRODUCT_KERNEL_HEADERS) @@ -692,9 +694,6 @@ endif RSCOMPAT_32BIT_ONLY_API_LEVELS := 8 9 10 11 12 13 14 15 16 17 18 19 20 RSCOMPAT_NO_USAGEIO_API_LEVELS := 8 9 10 11 12 13 -# Rules for QCOM targets -include $(BUILD_SYSTEM)/qcom_target.mk - # We might want to skip items listed in PRODUCT_COPY_FILES based on # various target flags. This is useful for replacing a binary module with one # built from source. This should be a list of destination files under $OUT From 0930d582f63125cfa414eb5d94433b25950274d6 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Fri, 2 Jan 2015 01:55:49 +0000 Subject: [PATCH 153/309] build: qcom: Set the correct project path for device-specific cameras Change-Id: I37696dae2c05c4e9f4fc824fbba63af47483ea17 --- core/qcom_target.mk | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 1e2066a3b..825ddbb1b 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -37,7 +37,11 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) 2ND_CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) +ifeq ($(USE_DEVICE_SPECIFIC_CAMERA),true) +$(call project-set-path,qcom-camera,$(TARGET_DEVICE_DIR)/camera) +else $(call qcom-set-path-variant,CAMERA,camera) +endif $(call project-set-path,qcom-display,hardware/qcom/display-caf/$(TARGET_BOARD_PLATFORM)) $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media-caf/$(TARGET_BOARD_PLATFORM)) From 530a4581789a9866b15ca4a4c083289ff7b86dc6 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Mon, 5 Jan 2015 02:53:51 +0000 Subject: [PATCH 154/309] build: Add an option to block non-enforcing builds Prevent accidental build of test configurations. Hopping back and forth between states (especially disabled) leaves a mess in the filesystems, and we want to try to ensure published builds go out enforcing. Change-Id: I2206975968de421dec842f49b02490fa85ca9f3b --- core/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/Makefile b/core/Makefile index 76fec8953..e0feaba60 100644 --- a/core/Makefile +++ b/core/Makefile @@ -883,6 +883,11 @@ INTERNAL_RECOVERYIMAGE_ARGS := \ # Assumes this has already been stripped ifdef BOARD_KERNEL_CMDLINE + ifdef BUILD_ENFORCE_SELINUX + ifneq (,$(filter androidboot.selinux=permissive androidboot.selinux=disabled, $(BOARD_KERNEL_CMDLINE))) + $(error "Trying to apply non-default selinux settings. Aborting") + endif + endif INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)" endif ifdef BOARD_KERNEL_BASE From 8c93225b36bd85ab214b1d2c4755d508f0126775 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Thu, 8 Jan 2015 14:36:45 -0800 Subject: [PATCH 155/309] releasetools: Store the build.prop file in the OTA zip This file is often used to read information about the update contained in the OTA. Place it in the update so it can be used by scripts. The file is not added to the updater-script, so it will not be placed onto the actual system. Change-Id: I88044796cbe8f199ca02df2840fd944cba2c73fa --- tools/releasetools/ota_from_target_files | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 6c5a8fbda..5f8f7d1d2 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -742,6 +742,9 @@ endif; script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) WriteMetadata(metadata, output_zip) + common.ZipWriteStr(output_zip, "system/build.prop", + ""+input_zip.read("SYSTEM/build.prop")) + common.ZipWriteStr(output_zip, "META-INF/org/cyanogenmod/releasekey", ""+input_zip.read("META/releasekey.txt")) From 6b65ba8505da8884a4aaaef9e85600e6c741999d Mon Sep 17 00:00:00 2001 From: Chris Sarbora Date: Wed, 17 Dec 2014 14:41:04 -0800 Subject: [PATCH 156/309] Allow finer control over how product variables are inherited. Change-Id: I3abc22eea94293d1d0ebf0a81b396ebea0baf5a8 (cherry picked from commit 29357f5ea1dd8507f70efc330b2e5966d13504e8) --- core/product.mk | 63 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/core/product.mk b/core/product.mk index 733b812e8..427fd83eb 100644 --- a/core/product.mk +++ b/core/product.mk @@ -127,24 +127,66 @@ define dump-products $(foreach p,$(PRODUCTS),$(call dump-product,$(p))) endef +# +# Internal function. Appends inherited product variables to an existing one. +# +# $(1): Product variable to operate on +# $(2): Value to append +# +define inherit-product_append-var + $(eval $(1) := $($(1)) $(INHERIT_TAG)$(strip $(2))) +endef + +# +# Internal function. Prepends inherited product variables to an existing one. +# +# $(1): Product variable to operate on +# $(2): Value to prepend +# +define inherit-product_prepend-var + $(eval $(1) := $(INHERIT_TAG)$(strip $(2)) $($(1))) +endef + +# +# Internal function. Tracks visited notes during inheritance resolution. +# +# $(1): Product being inherited +# +define inherit-product_track-node + $(eval inherit_var := \ + PRODUCTS.$(strip $(word 1,$(_include_stack))).INHERITS_FROM) \ + $(eval $(inherit_var) := $(sort $($(inherit_var)) $(strip $(1)))) \ + $(eval inherit_var:=) \ + $(eval ALL_PRODUCTS := $(sort $(ALL_PRODUCTS) $(word 1,$(_include_stack)))) +endef + # # $(1): product to inherit # # Does three things: -# 1. Inherits all of the variables from $1. +# 1. Inherits all of the variables from $1, prioritizing existing settings. # 2. Records the inheritance in the .INHERITS_FROM variable # 3. Records that we've visited this node, in ALL_PRODUCTS # define inherit-product $(foreach v,$(_product_var_list), \ - $(eval $(v) := $($(v)) $(INHERIT_TAG)$(strip $(1)))) \ - $(eval inherit_var := \ - PRODUCTS.$(strip $(word 1,$(_include_stack))).INHERITS_FROM) \ - $(eval $(inherit_var) := $(sort $($(inherit_var)) $(strip $(1)))) \ - $(eval inherit_var:=) \ - $(eval ALL_PRODUCTS := $(sort $(ALL_PRODUCTS) $(word 1,$(_include_stack)))) + $(call inherit-product_append-var,$(v),$(1))) \ + $(call inherit-product_track-node,$(1)) endef +# +# $(1): product to inherit +# +# Does three things: +# 1. Inherits all of the variables from $1, prioritizing inherited settings. +# 2. Records the inheritance in the .INHERITS_FROM variable +# 3. Records that we've visited this node, in ALL_PRODUCTS +# +define prepend-product + $(foreach v,$(_product_var_list), \ + $(call inherit-product_prepend-var,$(v),$(1))) \ + $(call inherit-product_track-node,$(1)) +endef # # Do inherit-product only if $(1) exists @@ -153,6 +195,13 @@ define inherit-product-if-exists $(if $(wildcard $(1)),$(call inherit-product,$(1)),) endef +# +# Do inherit-product-prepend only if $(1) exists +# +define prepend-product-if-exists + $(if $(wildcard $(1)),$(call prepend-product,$(1)),) +endef + # # $(1): product makefile list # From ac9d26b70e78ba870e30fdf8f9a5e372ffa0b010 Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Fri, 12 Dec 2014 11:51:33 -0800 Subject: [PATCH 157/309] build: ota: Support for install tools in /tmp/install * Anything in OUT/install gets packaged up into the zip and extracted to /tmp/install immediately after FullOTA_InstallBegin. * Use /tmp/install in edify scripts and remove code related to using and manipulating /system for install tools. Change-Id: I315a3238e36c8d15e26f935e272f7e27dd59c320 Conflicts: tools/releasetools/edify_generator.py --- tools/releasetools/edify_generator.py | 22 +++-------------- tools/releasetools/ota_from_target_files | 30 ++++++++++++------------ 2 files changed, 18 insertions(+), 34 deletions(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index a335a2e1d..1a3f09cfb 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -148,27 +148,11 @@ def AssertSomeBaseband(self, *basebands): self.script.append(self._WordWrap(cmd)) def RunBackup(self, command): - self.script.append('package_extract_file("system/bin/backuptool.sh", "/tmp/backuptool.sh");') - self.script.append('package_extract_file("system/bin/backuptool.functions", "/tmp/backuptool.functions");') - if not self.info.get("use_set_metadata", False): - self.script.append('set_perm(0, 0, 0755, "/tmp/backuptool.sh");') - self.script.append('set_perm(0, 0, 0644, "/tmp/backuptool.functions");') - else: - self.script.append('set_metadata("/tmp/backuptool.sh", "uid", 0, "gid", 0, "mode", 0755);') - self.script.append('set_metadata("/tmp/backuptool.functions", "uid", 0, "gid", 0, "mode", 0644);') - self.script.append(('run_program("/tmp/backuptool.sh", "%s");' % command)) - if command == "restore": - self.script.append('delete("/system/bin/backuptool.sh");') - self.script.append('delete("/system/bin/backuptool.functions");') + self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command)) def ValidateSignatures(self, command): - if command == "cleanup": - self.script.append('delete("/system/bin/otasigcheck.sh");') - else: - self.script.append('package_extract_file("system/bin/otasigcheck.sh", "/tmp/otasigcheck.sh");') - self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");') - self.script.append('set_metadata("/tmp/otasigcheck.sh", "uid", 0, "gid", 0, "mode", 0755);') - self.script.append('run_program("/tmp/otasigcheck.sh") == "0" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') + # Exit code 124 == abort. run_program returns raw, so left-shift 8bit + self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') def ShowProgress(self, frac, dur): """Update the progress bar, advancing it over 'frac' over the next diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 5f8f7d1d2..af4a87c09 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -506,6 +506,16 @@ def GetImage(which, tmpdir, info_dict): return sparse_img.SparseImage(path, mappath, clobbered_blocks) +def CopyInstallTools(output_zip): + oldcwd = os.getcwd() + os.chdir(os.getenv('OUT')) + for root, subdirs, files in os.walk("install"): + for f in files: + p = os.path.join(root, f) + output_zip.write(p, p) + os.chdir(oldcwd) + + def WriteFullOTAPackage(input_zip, output_zip): # TODO: how to determine this? We don't know what version it will # be installed on top of. For now, we expect the API just won't @@ -603,12 +613,12 @@ else if get_stage("%(bcb_dev)s") == "3/3" then script.AppendExtra("ifelse(is_mounted(\"/system\"), unmount(\"/system\"));") device_specific.FullOTA_InstallBegin() + CopyInstallTools(output_zip) + script.UnpackPackageDir("install", "/tmp/install") + script.SetPermissionsRecursive("/tmp/install", 0, 0, 0755, 0644, None, None) + script.SetPermissionsRecursive("/tmp/install/bin", 0, 0, 0755, 0755, None, None) + if OPTIONS.backuptool: - if block_based: - common.ZipWriteStr(output_zip, "system/bin/backuptool.sh", - ""+input_zip.read("SYSTEM/bin/backuptool.sh")) - common.ZipWriteStr(output_zip, "system/bin/backuptool.functions", - ""+input_zip.read("SYSTEM/bin/backuptool.functions")) script.Mount("/system") script.RunBackup("backup") script.Unmount("/system") @@ -620,10 +630,6 @@ else if get_stage("%(bcb_dev)s") == "3/3" then if HasVendorPartition(input_zip): system_progress -= 0.1 - if block_based: - common.ZipWriteStr(output_zip, "system/bin/otasigcheck.sh", - ""+input_zip.read("SYSTEM/bin/otasigcheck.sh")) - script.AppendExtra("if is_mounted(\"/data\") then") script.ValidateSignatures("data") script.AppendExtra("else") @@ -696,12 +702,6 @@ else if get_stage("%(bcb_dev)s") == "3/3" then common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) common.ZipWriteStr(output_zip, "boot.img", boot_img.data) - if block_based: - script.Mount("/system") - script.ValidateSignatures("cleanup") - if block_based: - script.Unmount("/system") - device_specific.FullOTA_PostValidate() if OPTIONS.backuptool: From 7f9875d8691af853d9edcfe6b3093adff90b554a Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Tue, 16 Dec 2014 09:58:47 -0800 Subject: [PATCH 158/309] build: Only set capabilities if provided * Allows OUT/install to be compatibile with K recoveries. Change-Id: I308ee5759da1e3c8e749e9408793f0720fb83cee --- tools/releasetools/edify_generator.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 1a3f09cfb..740eec425 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -339,10 +339,10 @@ def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities): if not self.info.get("use_set_metadata", False): self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn)) else: - if capabilities is None: - capabilities = "0x0" - cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \ - '"capabilities", %s' % (fn, uid, gid, mode, capabilities) + cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o' \ + % (fn, uid, gid, mode) + if capabilities is not None: + cmd += ', "capabilities", %s' % ( capabilities ) if selabel is not None: cmd += ', "selabel", "%s"' % selabel cmd += ');' @@ -355,11 +355,11 @@ def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel, self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");' % (uid, gid, dmode, fmode, fn)) else: - if capabilities is None: - capabilities = "0x0" cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \ - '"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \ - % (fn, uid, gid, dmode, fmode, capabilities) + '"dmode", 0%o, "fmode", 0%o' \ + % (fn, uid, gid, dmode, fmode) + if capabilities is not None: + cmd += ', "capabilities", "%s"' % ( capabilities ) if selabel is not None: cmd += ', "selabel", "%s"' % selabel cmd += ');' From 674cb145c1c4172a7123621fb6b188c181c2eac4 Mon Sep 17 00:00:00 2001 From: Roman Birg Date: Wed, 14 Jan 2015 11:33:46 -0800 Subject: [PATCH 159/309] build: fix darwin compile BSD cp command is different from GNU cp command. GNU ignores the slashes at the end, while the slash alters the behavior of the BSD version. Change-Id: I8e2e8d98df6819d9c3451b0d96e3269131a4fb4e Signed-off-by: Roman Birg --- core/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/core/Makefile b/core/Makefile index e0feaba60..e611fb29e 100644 --- a/core/Makefile +++ b/core/Makefile @@ -939,7 +939,6 @@ define build-recoveryimage-target $(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/* $(hide) cp -rf $(recovery_resources_common)/* $(TARGET_RECOVERY_ROOT_OUT)/res $(hide) cp -f $(recovery_font) $(TARGET_RECOVERY_ROOT_OUT)/res/images/font.png - $(hide) $(foreach item,$(recovery_root_private), \ cp -rf $(item) $(TARGET_RECOVERY_OUT)/) $(hide) $(foreach item,$(recovery_resources_private), \ cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/) From 521aab6207d44675d79948195cfc14c3fffab962 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Wed, 24 Dec 2014 03:18:54 -0800 Subject: [PATCH 160/309] core: More flexible kernel configuration * Allow various combinations of kernel vs. userspace architectures * Get rid of various assumptions about everything being ARM or 32-bit * This adds the following target flags: TARGET_KERNEL_ARCH TARGET_KERNEL_HEADER_ARCH KERNEL_HEADER_DEFCONFIG TARGET_KERNEL_CROSS_COMPILE_PREFIX Change-Id: If0bc202abd35c216ba3ea5707ffdb602526d8ed3 --- core/tasks/kernel.mk | 130 ++++++++++++++++++++++++++++++++----------- 1 file changed, 96 insertions(+), 34 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index e6be63a41..9f40eec3a 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -29,14 +29,50 @@ SELINUX_DEFCONFIG := $(TARGET_KERNEL_SELINUX_CONFIG) KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ KERNEL_CONFIG := $(KERNEL_OUT)/.config +TARGET_KERNEL_ARCH := $(strip $(TARGET_KERNEL_ARCH)) +ifeq ($(TARGET_KERNEL_ARCH),) +KERNEL_ARCH := $(TARGET_ARCH) +else +KERNEL_ARCH := $(TARGET_KERNEL_ARCH) +endif + +TARGET_KERNEL_HEADER_ARCH := $(strip $(TARGET_KERNEL_HEADER_ARCH)) +ifeq ($(TARGET_KERNEL_HEADER_ARCH),) +KERNEL_HEADER_ARCH := $(KERNEL_ARCH) +else +KERNEL_HEADER_ARCH := $(TARGET_KERNEL_HEADER_ARCH) +endif + +KERNEL_HEADER_DEFCONFIG := $(strip $(KERNEL_HEADER_DEFCONFIG)) +ifeq ($(KERNEL_HEADER_DEFCONFIG),) +KERNEL_HEADER_DEFCONFIG := $(KERNEL_DEFCONFIG) +endif + + ifneq ($(BOARD_KERNEL_IMAGE_NAME),) - TARGET_PREBUILT_INT_KERNEL_TYPE := $(BOARD_KERNEL_IMAGE_NAME) - TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(TARGET_ARCH)/boot/$(TARGET_PREBUILT_INT_KERNEL_TYPE) + TARGET_PREBUILT_INT_KERNEL_TYPE := $(BOARD_KERNEL_IMAGE_NAME) else - TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(TARGET_ARCH)/boot/zImage - TARGET_PREBUILT_INT_KERNEL_TYPE := zImage + ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true) + TARGET_PREBUILT_INT_KERNEL_TYPE := Image + else + TARGET_PREBUILT_INT_KERNEL_TYPE := zImage + endif +endif + +TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/$(TARGET_PREBUILT_INT_KERNEL_TYPE) + +# Clear this first to prevent accidental poisoning from env +MAKE_FLAGS := + +ifeq ($(KERNEL_ARCH),arm64) + # Avoid "unsupported RELA relocation: 311" errors (R_AARCH64_ADR_GOT_PAGE) + MAKE_FLAGS += CFLAGS_MODULE="-fno-pic" + ifeq ($(TARGET_ARCH),arm) + KERNEL_CONFIG_OVERRIDE := CONFIG_ANDROID_BINDER_IPC_32BIT=y + endif endif + ## Do be discontinued in a future version. Notify builder about target ## kernel format requirement ifeq ($(BOARD_KERNEL_IMAGE_NAME),) @@ -97,12 +133,7 @@ else else #$(info Kernel source found, building it) FULL_KERNEL_BUILD := true - ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true) - $(info Using uncompressed kernel) - KERNEL_BIN := $(KERNEL_OUT)/piggy - else - KERNEL_BIN := $(TARGET_PREBUILT_INT_KERNEL) - endif + KERNEL_BIN := $(TARGET_PREBUILT_INT_KERNEL) endif endif @@ -112,19 +143,38 @@ KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr KERNEL_MODULES_INSTALL := system KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules -ifeq ($(KERNEL_TOOLCHAIN),) -KERNEL_TOOLCHAIN := $(ARM_EABI_TOOLCHAIN) -endif +TARGET_KERNEL_CROSS_COMPILE_PREFIX := $(strip $(TARGET_KERNEL_CROSS_COMPILE_PREFIX)) +ifeq ($(TARGET_KERNEL_CROSS_COMPILE_PREFIX),) ifeq ($(KERNEL_TOOLCHAIN_PREFIX),) KERNEL_TOOLCHAIN_PREFIX := arm-eabi- endif +else +KERNEL_TOOLCHAIN_PREFIX := $(TARGET_KERNEL_CROSS_COMPILE_PREFIX) +endif + +ifeq ($(KERNEL_TOOLCHAIN),) +KERNEL_TOOLCHAIN_PATH := $(KERNEL_TOOLCHAIN_PREFIX) +else +ifneq ($(KERNEL_TOOLCHAIN_PREFIX),) +KERNEL_TOOLCHAIN_PATH := $(KERNEL_TOOLCHAIN)/$(KERNEL_TOOLCHAIN_PREFIX) +endif +endif + +ifneq ($(USE_CCACHE),) + ccache := $(ANDROID_BUILD_TOP)/prebuilts/misc/$(HOST_PREBUILT_TAG)/ccache/ccache + # Check that the executable is here. + ccache := $(strip $(wildcard $(ccache))) +endif + +KERNEL_CROSS_COMPILE := CROSS_COMPILE="$(ccache) $(KERNEL_TOOLCHAIN_PATH)" +ccache = define mv-modules mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.order`;\ if [ "$$mdpath" != "" ];then\ mpath=`dirname $$mdpath`;\ ko=`find $$mpath/kernel -type f -name *.ko`;\ - for i in $$ko; do $(KERNEL_TOOLCHAIN)/$(KERNEL_TOOLCHAIN_PREFIX)strip --strip-unneeded $$i;\ + for i in $$ko; do $(KERNEL_TOOLCHAIN_PATH)strip --strip-unneeded $$i;\ mv $$i $(KERNEL_MODULES_OUT)/; done;\ fi endef @@ -136,18 +186,8 @@ define clean-module-folder fi endef -ifeq ($(TARGET_ARCH),arm) - ifneq ($(USE_CCACHE),) - ccache := $(ANDROID_BUILD_TOP)/prebuilts/misc/$(HOST_PREBUILT_TAG)/ccache/ccache - # Check that the executable is here. - ccache := $(strip $(wildcard $(ccache))) - endif - ARM_CROSS_COMPILE:=CROSS_COMPILE="$(ccache) $(KERNEL_TOOLCHAIN)/$(KERNEL_TOOLCHAIN_PREFIX)" - ccache = -endif - ifeq ($(HOST_OS),darwin) - MAKE_FLAGS := C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/0.153/libelf/ + MAKE_FLAGS += C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/0.153/libelf/ endif ifeq ($(TARGET_KERNEL_MODULES),) @@ -159,16 +199,17 @@ $(KERNEL_OUT): mkdir -p $(KERNEL_MODULES_OUT) $(KERNEL_CONFIG): $(KERNEL_OUT) - $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(TARGET_ARCH) $(ARM_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG) - -$(KERNEL_OUT)/piggy : $(TARGET_PREBUILT_INT_KERNEL) - $(hide) gunzip -c $(KERNEL_OUT)/arch/$(TARGET_ARCH)/boot/compressed/piggy.gzip > $(KERNEL_OUT)/piggy + $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG) + $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ + echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ + echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi TARGET_KERNEL_BINARIES: $(KERNEL_OUT) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL) - $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(TARGET_ARCH) $(ARM_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE) - -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(TARGET_ARCH) $(ARM_CROSS_COMPILE) dtbs - -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(TARGET_ARCH) $(ARM_CROSS_COMPILE) modules - -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) ARCH=$(TARGET_ARCH) $(ARM_CROSS_COMPILE) modules_install + $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE) + -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) dtbs + -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules + -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules_install $(mv-modules) $(clean-module-folder) @@ -179,7 +220,28 @@ $(TARGET_PREBUILT_INT_KERNEL): $(TARGET_KERNEL_MODULES) $(clean-module-folder) $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) - $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(TARGET_ARCH) $(ARM_CROSS_COMPILE) headers_install + $(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \ + $(hide) rm -f ../$(KERNEL_CONFIG); \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) headers_install; fi + $(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \ + echo "Used a different defconfig for header generation"; \ + $(hide) rm -f ../$(KERNEL_CONFIG); \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi + $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ + echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ + echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi + +kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG) + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags + +kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG) + env KCONFIG_NOTIMESTAMP=true \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) menuconfig + env KCONFIG_NOTIMESTAMP=true \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) savedefconfig + cp $(KERNEL_OUT)/defconfig kernel/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) endif # FULL_KERNEL_BUILD From df31e69019e35b0d3428acc1ff19017022a48ee9 Mon Sep 17 00:00:00 2001 From: Brian Chu Date: Mon, 5 Jan 2015 23:03:30 -0500 Subject: [PATCH 161/309] Add support for OSX up to 10.10 and CLI Tools - Support using OSX versions 10.9 and 10.10 as the compilation host. Please note that the prebuilt darwin-x86 gcc is missing an upstream patch that properly identifies 10.10 as newer than 10.2, which restricts mac_sdk_versions_supported to 10.9. - Support using Xcode Command Line Tools, waiving the requirement for installing the full Xcode development suite for 600k worth of C++ headers. These build modifications have been verified by successfully compiling a cm12-hammerhead image from scratch on a OSX Yosemite 10.10.1 host with Xcode Command Line Tools 6.1.1 installed. Change-Id: Id1741ef583d186eb6a42093e5111431ae7b08a2c --- core/combo/mac_version.mk | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/core/combo/mac_version.mk b/core/combo/mac_version.mk index 6defba79e..d18e4c590 100644 --- a/core/combo/mac_version.mk +++ b/core/combo/mac_version.mk @@ -9,17 +9,19 @@ ifndef build_mac_version build_mac_version := $(shell sw_vers -productVersion) -mac_sdk_versions_supported := 10.6 10.7 10.8 10.9 +# Caution: Do not add 10.10 to this list until the prebuilts/darwin-x86 toolchains are updated. +# In the meantime, setting mac_sdk_version to 10.9 works on Yosemite (verified on 10.10.1). +mac_sdk_versions_supported := 10.6 10.7 10.8 10.9 ifneq ($(strip $(MAC_SDK_VERSION)),) mac_sdk_version := $(MAC_SDK_VERSION) ifeq ($(filter $(mac_sdk_version),$(mac_sdk_versions_supported)),) $(warning ****************************************************************) -$(warning * MAC_SDK_VERSION $(MAC_SDK_VERSION) isn't one of the supported $(mac_sdk_versions_supported)) +$(warning * MAC_SDK_VERSION $(MAC_SDK_VERSION) isn\'t one of the supported $(mac_sdk_versions_supported)) $(warning ****************************************************************) $(error Stop.) endif else -mac_sdk_versions_installed := $(shell xcodebuild -showsdks | grep macosx | sort | sed -e "s/.*macosx//g") +mac_sdk_versions_installed := $(shell xcodebuild -showsdks 2> /dev/null | grep macosx | sort | sed -e "s/.*macosx//g") mac_sdk_version := $(firstword $(filter $(mac_sdk_versions_installed), $(mac_sdk_versions_supported))) ifeq ($(mac_sdk_version),) mac_sdk_version := $(firstword $(mac_sdk_versions_supported)) @@ -27,6 +29,18 @@ endif endif mac_sdk_path := $(shell xcode-select -print-path) + +ifeq ($(strip "$(mac_sdk_path)"), "/Library/Developer/CommandLineTools") +# Accept any modern version of Apple Command Line Tools +mac_sdk_root := / + +# Override mac_sdk_version with build_mac_version (aka the version of the OSX host), but assume the latest +# supported mac_sdk_version if the build_mac_version is not recognized. +mac_sdk_version := $(shell echo $(build_mac_version) | cut -d '.' -f 1,2) +ifeq ($(filter $(mac_sdk_version),$(mac_sdk_versions_supported)),) +mac_sdk_version := $(lastword $(mac_sdk_versions_supported)) +endif +else # try /Applications/Xcode*.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.?.sdk # or /Volume/Xcode/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.?.sdk mac_sdk_root := $(mac_sdk_path)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX$(mac_sdk_version).sdk @@ -40,6 +54,7 @@ $(warning * Can not find SDK $(mac_sdk_version) at $(mac_sdk_root)) $(warning *****************************************************) $(error Stop.) endif +endif # $(mac_sdk_path) ifeq ($(mac_sdk_version),10.6) gcc_darwin_version := 10 From 0d2a774090a9f630d8f99824fda46c7ee420c32c Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Sun, 18 Jan 2015 19:34:14 -0500 Subject: [PATCH 162/309] Fix kernel.mk if SELINUX or VARIANT_DEFCONFIG is defined KERNEL_HEADER_DEFCONFIG was ignoring SELINUX_DEFCONFIG thus breaking all selinux support Change-Id: Idc3367d6b4b85343078e63e87dca6d6d052e7f53 --- core/tasks/kernel.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 9f40eec3a..a2eb21e19 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -222,12 +222,12 @@ $(TARGET_PREBUILT_INT_KERNEL): $(TARGET_KERNEL_MODULES) $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) $(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \ $(hide) rm -f ../$(KERNEL_CONFIG); \ - $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_HEADER_DEFCONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) headers_install; fi $(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \ echo "Used a different defconfig for header generation"; \ $(hide) rm -f ../$(KERNEL_CONFIG); \ - $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG); fi $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \ From 114137afb8c4c9a80cac41e57f779f392f991583 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Tue, 20 Jan 2015 01:41:19 +0000 Subject: [PATCH 163/309] build: kernel: Fix stray "@" Nested "$(hide)"s (makefile @) don't work. There was one at the beginning of the script, remove the inner one to prevent it from being interpreted as a shell command Change-Id: I08a829ee56b212366a0f5a3812f7a614667a9c2b --- core/tasks/kernel.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index a2eb21e19..2c4927120 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -221,12 +221,12 @@ $(TARGET_PREBUILT_INT_KERNEL): $(TARGET_KERNEL_MODULES) $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) $(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \ - $(hide) rm -f ../$(KERNEL_CONFIG); \ + rm -f ../$(KERNEL_CONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_HEADER_DEFCONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) headers_install; fi $(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \ echo "Used a different defconfig for header generation"; \ - $(hide) rm -f ../$(KERNEL_CONFIG); \ + rm -f ../$(KERNEL_CONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG); fi $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ From 86a2bbb51bfc9e8b3d55954408476f947ab2404a Mon Sep 17 00:00:00 2001 From: Ameya Thakur Date: Tue, 18 Nov 2014 15:27:07 -0800 Subject: [PATCH 164/309] build: releasetools: ota: Add support for 32-64 bit upgrades We now use the new API to determine if the update package is compatible with the device it is currently being applied on or not. We also use the 32 bit updater libraries while compiling the ota package Change-Id: I4d1d2bd90e03e6f4b2f786d25c6d02f62243c3f3 --- core/Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/Makefile b/core/Makefile index e611fb29e..5b1cfe32c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1523,11 +1523,10 @@ built_ota_tools := \ $(call intermediates-dir-for,EXECUTABLES,sqlite3,,,$(TARGET_PREFER_32_BIT))/sqlite3 \ ifeq ($(TARGET_ARCH),arm64) -built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater,,,32)/updater + built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater,,,32)/updater else -built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater)/updater + built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater)/updater endif - $(BUILT_TARGET_FILES_PACKAGE): PRIVATE_OTA_TOOLS := $(built_ota_tools) $(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION) From a3280a22077e90b5231f15396bff1fa95246fead Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Fri, 23 Jan 2015 17:16:30 -0800 Subject: [PATCH 165/309] releasetools: Add radio images to fastboot packages Fastboot packages should include radio images, if they exist. Also generate a flash-radio.sh file if a filesmap file exists to indicate the partition that a radio image should be flashed to. Change-Id: I8f603b1509a5c1b55fee1cb6f6a49a7efb8cc594 --- tools/releasetools/img_from_target_files.py | 26 +++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py index c48699200..e896ba3f6 100755 --- a/tools/releasetools/img_from_target_files.py +++ b/tools/releasetools/img_from_target_files.py @@ -47,6 +47,31 @@ def CopyInfo(output_zip): output_zip, os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"), "android-info.txt") +def AddRadio(output_zip): + """If they exist, add RADIO files to the output.""" + if os.path.isdir(os.path.join(OPTIONS.input_tmp, "RADIO")): + for radio_root, radio_dirs, radio_files in os.walk(os.path.join(OPTIONS.input_tmp, "RADIO")): + for radio_file in radio_files: + output_zip.write(os.path.join(radio_root, radio_file), radio_file) + + # If a filesmap file exists, create a script to flash the radio images based on it + filesmap = os.path.join(OPTIONS.input_tmp, "RADIO/filesmap") + if os.path.isfile(filesmap): + print "creating flash-radio.sh..." + filesmap_data = open(filesmap, "r") + filesmap_regex = re.compile(r'^(\S+)\s\S+\/by-name\/(\S+).*') + tmp_flash_radio = tempfile.NamedTemporaryFile() + tmp_flash_radio.write("#!/bin/sh\n\n") + for filesmap_line in filesmap_data: + filesmap_entry = filesmap_regex.search(filesmap_line) + if filesmap_entry: + tmp_flash_radio.write("fastboot flash %s %s\n" % (filesmap_entry.group(2), filesmap_entry.group(1))) + tmp_flash_radio.flush() + if os.path.getsize(tmp_flash_radio.name) > 0: + output_zip.write(tmp_flash_radio.name, "flash-radio.sh") + else: + print "flash-radio.sh is empty, skipping..." + tmp_flash_radio.close() def main(argv): bootable_only = [False] @@ -72,6 +97,7 @@ def option_handler(o, _): OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED) CopyInfo(output_zip) + AddRadio(output_zip) try: done = False From a7449a16003f05d49ba89c8ce417cd007a365e83 Mon Sep 17 00:00:00 2001 From: Marco Brohet Date: Sat, 28 Dec 2013 18:11:52 +0100 Subject: [PATCH 166/309] build: Add support for Asturian Change-Id: If666c8390b3c095c16e66df777c67a46511f4916 --- target/product/languages_full.mk | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/target/product/languages_full.mk b/target/product/languages_full.mk index 9d80b0ecf..0825e6c90 100644 --- a/target/product/languages_full.mk +++ b/target/product/languages_full.mk @@ -21,4 +21,9 @@ # These are all the locales that have translations and are displayable # by TextView in this branch. + PRODUCT_LOCALES := en_US en_AU en_IN fr_FR it_IT es_ES et_EE de_DE nl_NL cs_CZ pl_PL ja_JP zh_TW zh_CN zh_HK ru_RU ko_KR nb_NO es_US da_DK el_GR tr_TR pt_PT pt_BR rm_CH sv_SE bg_BG ca_ES en_GB fi_FI hi_IN hr_HR hu_HU in_ID iw_IL lt_LT lv_LV ro_RO sk_SK sl_SI sr_RS uk_UA vi_VN tl_PH ar_EG fa_IR th_TH sw_TZ ms_MY af_ZA zu_ZA am_ET hi_IN en_XA ar_XB fr_CA km_KH lo_LA ne_NP si_LK mn_MN hy_AM az_AZ ka_GE my_MM mr_IN ml_IN is_IS mk_MK ky_KG eu_ES gl_ES bn_BD ta_IN kn_IN te_IN uz_UZ ur_PK kk_KZ sq_AL gu_IN pa_IN + +# CyanogenMod +PRODUCT_LOCALES += ast_ES + From c96d09d63fc23fa3be1395b952ec798d9cbdece8 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Sun, 30 Mar 2014 01:13:25 +0200 Subject: [PATCH 167/309] build: Add support for Luxembourgish Change-Id: I0fb6d8f8a33f209d5593f133c2e891fe9be102c2 --- target/product/languages_full.mk | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/target/product/languages_full.mk b/target/product/languages_full.mk index 0825e6c90..aa3c99622 100644 --- a/target/product/languages_full.mk +++ b/target/product/languages_full.mk @@ -25,5 +25,4 @@ PRODUCT_LOCALES := en_US en_AU en_IN fr_FR it_IT es_ES et_EE de_DE nl_NL cs_CZ pl_PL ja_JP zh_TW zh_CN zh_HK ru_RU ko_KR nb_NO es_US da_DK el_GR tr_TR pt_PT pt_BR rm_CH sv_SE bg_BG ca_ES en_GB fi_FI hi_IN hr_HR hu_HU in_ID iw_IL lt_LT lv_LV ro_RO sk_SK sl_SI sr_RS uk_UA vi_VN tl_PH ar_EG fa_IR th_TH sw_TZ ms_MY af_ZA zu_ZA am_ET hi_IN en_XA ar_XB fr_CA km_KH lo_LA ne_NP si_LK mn_MN hy_AM az_AZ ka_GE my_MM mr_IN ml_IN is_IS mk_MK ky_KG eu_ES gl_ES bn_BD ta_IN kn_IN te_IN uz_UZ ur_PK kk_KZ sq_AL gu_IN pa_IN # CyanogenMod -PRODUCT_LOCALES += ast_ES - +PRODUCT_LOCALES += ast_ES lb_LU From 008d7011829da6fa752ae4493e20a99b13946106 Mon Sep 17 00:00:00 2001 From: Konsta Date: Thu, 22 Jan 2015 20:05:00 +0200 Subject: [PATCH 168/309] build: Remove OpenWnn IME from target config Change-Id: Ic34caa26bf53fc882e685794844ecfa27519e580 --- target/product/full_base.mk | 4 ---- 1 file changed, 4 deletions(-) diff --git a/target/product/full_base.mk b/target/product/full_base.mk index 7c7c86955..cd6e00b19 100644 --- a/target/product/full_base.mk +++ b/target/product/full_base.mk @@ -21,10 +21,6 @@ PRODUCT_PACKAGES := \ libfwdlockengine \ - OpenWnn \ - libWnnEngDic \ - libWnnJpnDic \ - libwnndict \ WAPPushManager # Additional settings used in all AOSP builds From 72e274f8640627b1e59f6e79c8760947d4832b0c Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Mon, 2 Feb 2015 00:11:06 +0200 Subject: [PATCH 169/309] build: Add support for Kurdish Change-Id: I1bc7638ff92b61be6c3212dbd5a3f1c354093458 --- target/product/languages_full.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/product/languages_full.mk b/target/product/languages_full.mk index aa3c99622..d4410b345 100644 --- a/target/product/languages_full.mk +++ b/target/product/languages_full.mk @@ -25,4 +25,4 @@ PRODUCT_LOCALES := en_US en_AU en_IN fr_FR it_IT es_ES et_EE de_DE nl_NL cs_CZ pl_PL ja_JP zh_TW zh_CN zh_HK ru_RU ko_KR nb_NO es_US da_DK el_GR tr_TR pt_PT pt_BR rm_CH sv_SE bg_BG ca_ES en_GB fi_FI hi_IN hr_HR hu_HU in_ID iw_IL lt_LT lv_LV ro_RO sk_SK sl_SI sr_RS uk_UA vi_VN tl_PH ar_EG fa_IR th_TH sw_TZ ms_MY af_ZA zu_ZA am_ET hi_IN en_XA ar_XB fr_CA km_KH lo_LA ne_NP si_LK mn_MN hy_AM az_AZ ka_GE my_MM mr_IN ml_IN is_IS mk_MK ky_KG eu_ES gl_ES bn_BD ta_IN kn_IN te_IN uz_UZ ur_PK kk_KZ sq_AL gu_IN pa_IN # CyanogenMod -PRODUCT_LOCALES += ast_ES lb_LU +PRODUCT_LOCALES += ast_ES lb_LU ku_IQ From 6f43ff6e4151716a3e331fc5e70fda6e58fc1b28 Mon Sep 17 00:00:00 2001 From: Dan Pasanen Date: Sat, 7 Feb 2015 18:59:36 -0600 Subject: [PATCH 170/309] releasetools: fix cleaning up /tmp/ * previous commit did exactly nothing... Change-Id: I66876aff83528596d628d24e60b5d4c0d81577bd --- tools/releasetools/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 81ad855ab..14a5f409b 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -487,7 +487,7 @@ def UnzipTemp(filename, pattern=None): OPTIONS.tempfiles.append(tmp) def unzip_to_dir(filename, dirname): - cmd = ["rm", "-rf", dirname + filename, "targetfiles-*"] + subprocess.call(["rm", "-rf", dirname + filename, "targetfiles-*"]) cmd = ["unzip", "-o", "-q", filename, "-d", dirname] if pattern is not None: cmd.append(pattern) From 354a0c48a230d6a852664fed0fc2a3ab9c031a0f Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Tue, 10 Feb 2015 23:58:10 +0000 Subject: [PATCH 171/309] Revert "Add handheld_core_hardware.xml to telephony base" Don't do this. handheld_core_hardware.xml includes feature declarations that aren't mandatory, particularly cameras and magnetometers, but also things like managed users. According to the CDD, these are optional. Go back to the pre-L mechanism of including the declarations per-device to make them accurate. This reverts commit dccce7bbe9673d4b8ce1a3559182767f926e1ff8. Conflicts: target/product/full_base_telephony.mk Change-Id: I6db55082295171b78fe595e783799ee54458c4fd --- target/product/full_base_telephony.mk | 3 --- 1 file changed, 3 deletions(-) diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk index 4d6fce7a6..7988e9b77 100644 --- a/target/product/full_base_telephony.mk +++ b/target/product/full_base_telephony.mk @@ -22,8 +22,5 @@ PRODUCT_PROPERTY_OVERRIDES := \ keyguard.no_require_sim=true -PRODUCT_COPY_FILES := \ - frameworks/native/data/etc/handheld_core_hardware.xml:system/etc/permissions/handheld_core_hardware.xml - $(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base.mk) $(call inherit-product, $(SRC_TARGET_DIR)/product/telephony.mk) From f03015d5311f7ee7e10f3a5ee73760f78c3fdcfb Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Tue, 19 Jun 2012 18:39:41 -0700 Subject: [PATCH 172/309] add unpackbootimg to otatools Change-Id: I4e294c05eacc8bef8273247bcf1bf382291a3d31 --- core/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/core/Makefile b/core/Makefile index 5b1cfe32c..ad155b20c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1433,6 +1433,7 @@ DISTTOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \ $(HOST_OUT_EXECUTABLES)/adb \ $(HOST_OUT_EXECUTABLES)/mkbootfs \ $(HOST_OUT_EXECUTABLES)/mkbootimg \ + $(HOST_OUT_EXECUTABLES)/unpackbootimg \ $(HOST_OUT_EXECUTABLES)/fs_config \ $(HOST_OUT_EXECUTABLES)/zipalign \ $(HOST_OUT_EXECUTABLES)/bsdiff \ From 0e2467faf208083be5d1c8d2e7121b80b362abf2 Mon Sep 17 00:00:00 2001 From: Marcos Marado Date: Tue, 13 Jan 2015 15:14:28 +0000 Subject: [PATCH 173/309] dopush only tries to push if its arg doesn't fail `dopush mm`, as an example, tried to do mm, ignored its result, and went to push. Now, it does mm, and if mm exits successfully it continues to push the results, but if mm fails dopush stops, returning mm's return code. This is useful for having things like: $ mmp && adb reboot which now reboot the device weather the changes were pushed or not. With this patch, the device will only get rebooted if the compilation succeeds. Change-Id: I001e3dd83e25a775919adbccbd49914da1e94cde --- envsetup.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index c73a42dcc..d7c9e0f8a 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -2195,7 +2195,11 @@ function dopush() adb remount &> /dev/null mkdir -p $OUT - $func $* | tee $OUT/.log + ($func $*|tee $OUT/.log;return ${PIPESTATUS[0]}) + ret=$?; + if [ $ret -ne 0 ]; then + rm -f $OUT/.log;return $ret + fi # Install: LOC="$(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Install: ' | cut -d ':' -f 2)" From e42b7f3c617e30c511f26bc8565a28554d68f535 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Thu, 19 Feb 2015 14:29:21 +0200 Subject: [PATCH 174/309] Don't enable ADB by default on userdebug builds Change-Id: I33ae5c6f2787017a62e679aa0c28d4b909d45935 --- tools/post_process_props.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/post_process_props.py b/tools/post_process_props.py index cbbf1f1b5..ec1aacef9 100755 --- a/tools/post_process_props.py +++ b/tools/post_process_props.py @@ -40,9 +40,8 @@ def mangle_build_prop(prop, overrides): # Put the modifications that you need to make into the /default.prop into this # function. The prop object has get(name) and put(name,value) methods. def mangle_default_prop(prop): - # If ro.debuggable is 1, then enable adb on USB by default - # (this is for userdebug builds) - if prop.get("ro.debuggable") == "1": + # If ro.build.type is eng, then enable adb on USB by default + if prop.get("ro.build.type") == "eng": val = prop.get("persist.sys.usb.config") if val == "": val = "adb" From c9cef696b2e6d34c6cca02ed0abbf2f76ad124e1 Mon Sep 17 00:00:00 2001 From: Scott Mertz Date: Thu, 26 Feb 2015 10:51:44 -0800 Subject: [PATCH 175/309] Enable ADB by default when ro.adb.secure is not 1 * Property ro.build.type is not part of the default.prop we can't use this to decide how to apply adb by default within this function Change-Id: Ib3eb24c655353966d64c7148d7530244b628ce94 --- tools/post_process_props.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/post_process_props.py b/tools/post_process_props.py index ec1aacef9..5193d4b57 100755 --- a/tools/post_process_props.py +++ b/tools/post_process_props.py @@ -40,8 +40,9 @@ def mangle_build_prop(prop, overrides): # Put the modifications that you need to make into the /default.prop into this # function. The prop object has get(name) and put(name,value) methods. def mangle_default_prop(prop): - # If ro.build.type is eng, then enable adb on USB by default - if prop.get("ro.build.type") == "eng": + # If ro.adb.secure is not 1, then enable adb on USB by default + # (this is for eng builds) + if prop.get("ro.adb.secure") != "1": val = prop.get("persist.sys.usb.config") if val == "": val = "adb" From 5d8053f3541f15d6b73ea949102446957b4805b7 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Mon, 9 Mar 2015 15:07:08 +0000 Subject: [PATCH 176/309] build: Add support for additional kernel config snippet * Add support for TARGET_KERNEL_ADDITIONAL_CONFIG which will append a config snippet from arch/$ARCH/configs/ to the main defconfig. * This can be used for various things, such as including DIAG support into debuggable builds. Change-Id: Ifa48688a3f951dd8ecc9a13a27de3a476e7bf633 --- core/tasks/kernel.mk | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 2c4927120..764532cfa 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -72,6 +72,9 @@ ifeq ($(KERNEL_ARCH),arm64) endif endif +ifneq ($(TARGET_KERNEL_ADDITIONAL_CONFIG),) +KERNEL_ADDITIONAL_CONFIG := $(TARGET_KERNEL_ADDITIONAL_CONFIG) +endif ## Do be discontinued in a future version. Notify builder about target ## kernel format requirement @@ -204,6 +207,10 @@ $(KERNEL_CONFIG): $(KERNEL_OUT) echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi + $(hide) if [ ! -z "$(KERNEL_ADDITIONAL_CONFIG)" ]; then \ + echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \ + cat $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG) >> $(KERNEL_OUT)/.config; \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi TARGET_KERNEL_BINARIES: $(KERNEL_OUT) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL) $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE) @@ -232,6 +239,10 @@ $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi + $(hide) if [ ! -z "$(KERNEL_ADDITIONAL_CONFIG)" ]; then \ + echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \ + cat $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG) >> $(KERNEL_OUT)/.config; \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags From 69ee65a4a7a726dc15da074627861ca6fd23c302 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Tue, 10 Mar 2015 18:58:23 -0700 Subject: [PATCH 177/309] build: Update install tools packaging for target-files support Modifies "build: ota: Support for install tools in /tmp/install" to support signing steps being split from build steps. Package install files into target-files INSTALL path Read from target-files for OTA package creation Change-Id: I64f919c2a757b5474f6cc5f82bd6c33c2a8b558a --- core/Makefile | 3 +++ tools/releasetools/ota_from_target_files | 11 +++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/core/Makefile b/core/Makefile index ad155b20c..2187c1e5b 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1578,6 +1578,9 @@ $(BUILT_TARGET_FILES_PACKAGE): \ $(hide) mkdir -p $(zip_root)/RECOVERY $(hide) $(call package_files-copy-root, \ $(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/RECOVERY/RAMDISK) + @# OTA install helpers + $(hide) $(call package_files-copy-root, $(OUT)/install, $(zip_root)/INSTALL) + # Just copy the already built boot/recovery images into the target-files dir # in order to avoid mismatched images between the out dir and what the ota # build system tries to rebuild. diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index af4a87c09..17e5e06e5 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -507,13 +507,12 @@ def GetImage(which, tmpdir, info_dict): def CopyInstallTools(output_zip): - oldcwd = os.getcwd() - os.chdir(os.getenv('OUT')) - for root, subdirs, files in os.walk("install"): + install_path = os.path.join(OPTIONS.input_tmp, "INSTALL") + for root, subdirs, files in os.walk(install_path): for f in files: - p = os.path.join(root, f) - output_zip.write(p, p) - os.chdir(oldcwd) + install_source = os.path.join(root, f) + install_target = os.path.join("install", os.path.relpath(root, install_path), f) + output_zip.write(install_source, install_target) def WriteFullOTAPackage(input_zip, output_zip): From 3dde85b9b897899c310e9705943dfcb5aa9133b7 Mon Sep 17 00:00:00 2001 From: Josue Rivera Date: Tue, 29 Dec 2015 01:46:22 +0100 Subject: [PATCH 178/309] Revert "Add temporary hack to help with merge resolution." This reverts commit 3c4340c929d54ac8c4772650a8d3efa29336fa5f. Change-Id: I94da9fa46a7fdd960299f110d335633ff4760987 Signed-off-by: Josue Rivera --- .../check_target_files_signatures | 443 +-------------- .../check_target_files_signatures.py | 442 +++++++++++++++ .../check_target_files_signatures.tmp | 1 - tools/releasetools/make_recovery_patch | 54 +- tools/releasetools/make_recovery_patch.py | 53 ++ tools/releasetools/make_recovery_patch.tmp | 1 - tools/releasetools/sign_target_files_apks | 521 +----------------- tools/releasetools/sign_target_files_apks.py | 512 +++++++++++++++++ tools/releasetools/sign_target_files_apks.tmp | 1 - 9 files changed, 1010 insertions(+), 1018 deletions(-) mode change 100755 => 120000 tools/releasetools/check_target_files_signatures create mode 100755 tools/releasetools/check_target_files_signatures.py delete mode 120000 tools/releasetools/check_target_files_signatures.tmp mode change 100755 => 120000 tools/releasetools/make_recovery_patch create mode 100755 tools/releasetools/make_recovery_patch.py delete mode 120000 tools/releasetools/make_recovery_patch.tmp mode change 100755 => 120000 tools/releasetools/sign_target_files_apks create mode 100755 tools/releasetools/sign_target_files_apks.py delete mode 120000 tools/releasetools/sign_target_files_apks.tmp diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures deleted file mode 100755 index 5c541abc6..000000000 --- a/tools/releasetools/check_target_files_signatures +++ /dev/null @@ -1,442 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2009 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Check the signatures of all APKs in a target_files .zip file. With --c, compare the signatures of each package to the ones in a separate -target_files (usually a previously distributed build for the same -device) and flag any changes. - -Usage: check_target_file_signatures [flags] target_files - - -c (--compare_with) - Look for compatibility problems between the two sets of target - files (eg., packages whose keys have changed). - - -l (--local_cert_dirs) - Comma-separated list of top-level directories to scan for - .x509.pem files. Defaults to "vendor,build". Where cert files - can be found that match APK signatures, the filename will be - printed as the cert name, otherwise a hash of the cert plus its - subject string will be printed instead. - - -t (--text) - Dump the certificate information for both packages in comparison - mode (this output is normally suppressed). - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import os -import re -import shutil -import subprocess -import zipfile - -import common - -# Work around a bug in python's zipfile module that prevents opening -# of zipfiles if any entry has an extra field of between 1 and 3 bytes -# (which is common with zipaligned APKs). This overrides the -# ZipInfo._decodeExtra() method (which contains the bug) with an empty -# version (since we don't need to decode the extra field anyway). -class MyZipInfo(zipfile.ZipInfo): - def _decodeExtra(self): - pass -zipfile.ZipInfo = MyZipInfo - -OPTIONS = common.OPTIONS - -OPTIONS.text = False -OPTIONS.compare_with = None -OPTIONS.local_cert_dirs = ("vendor", "build") - -PROBLEMS = [] -PROBLEM_PREFIX = [] - -def AddProblem(msg): - PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) -def Push(msg): - PROBLEM_PREFIX.append(msg) -def Pop(): - PROBLEM_PREFIX.pop() - - -def Banner(msg): - print "-" * 70 - print " ", msg - print "-" * 70 - - -def GetCertSubject(cert): - p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - out, err = p.communicate(cert) - if err and not err.strip(): - return "(error reading cert subject)" - for line in out.split("\n"): - line = line.strip() - if line.startswith("Subject:"): - return line[8:].strip() - return "(unknown cert subject)" - - -class CertDB(object): - def __init__(self): - self.certs = {} - - def Add(self, cert, name=None): - if cert in self.certs: - if name: - self.certs[cert] = self.certs[cert] + "," + name - else: - if name is None: - name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], - GetCertSubject(cert)) - self.certs[cert] = name - - def Get(self, cert): - """Return the name for a given cert.""" - return self.certs.get(cert, None) - - def FindLocalCerts(self): - to_load = [] - for top in OPTIONS.local_cert_dirs: - for dirpath, _, filenames in os.walk(top): - certs = [os.path.join(dirpath, i) - for i in filenames if i.endswith(".x509.pem")] - if certs: - to_load.extend(certs) - - for i in to_load: - f = open(i) - cert = common.ParseCertificate(f.read()) - f.close() - name, _ = os.path.splitext(i) - name, _ = os.path.splitext(name) - self.Add(cert, name) - -ALL_CERTS = CertDB() - - -def CertFromPKCS7(data, filename): - """Read the cert out of a PKCS#7-format file (which is what is - stored in a signed .apk).""" - Push(filename + ":") - try: - p = common.Run(["openssl", "pkcs7", - "-inform", "DER", - "-outform", "PEM", - "-print_certs"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - out, err = p.communicate(data) - if err and not err.strip(): - AddProblem("error reading cert:\n" + err) - return None - - cert = common.ParseCertificate(out) - if not cert: - AddProblem("error parsing cert output") - return None - return cert - finally: - Pop() - - -class APK(object): - def __init__(self, full_filename, filename): - self.filename = filename - self.certs = None - self.shared_uid = None - self.package = None - - Push(filename+":") - try: - self.RecordCerts(full_filename) - self.ReadManifest(full_filename) - finally: - Pop() - - def RecordCerts(self, full_filename): - out = set() - try: - f = open(full_filename) - apk = zipfile.ZipFile(f, "r") - pkcs7 = None - for info in apk.infolist(): - if info.filename.startswith("META-INF/") and \ - (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): - pkcs7 = apk.read(info.filename) - cert = CertFromPKCS7(pkcs7, info.filename) - out.add(cert) - ALL_CERTS.Add(cert) - if not pkcs7: - AddProblem("no signature") - finally: - f.close() - self.certs = frozenset(out) - - def ReadManifest(self, full_filename): - p = common.Run(["aapt", "dump", "xmltree", full_filename, - "AndroidManifest.xml"], - stdout=subprocess.PIPE) - manifest, err = p.communicate() - if err: - AddProblem("failed to read manifest") - return - - self.shared_uid = None - self.package = None - - for line in manifest.split("\n"): - line = line.strip() - m = re.search(r'A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) - if m: - name = m.group(1) - if name == "android:sharedUserId": - if self.shared_uid is not None: - AddProblem("multiple sharedUserId declarations") - self.shared_uid = m.group(2) - elif name == "package": - if self.package is not None: - AddProblem("multiple package declarations") - self.package = m.group(2) - - if self.package is None: - AddProblem("no package declaration") - - -class TargetFiles(object): - def __init__(self): - self.max_pkg_len = 30 - self.max_fn_len = 20 - self.apks = None - self.apks_by_basename = None - self.certmap = None - - def LoadZipFile(self, filename): - d, z = common.UnzipTemp(filename, '*.apk') - try: - self.apks = {} - self.apks_by_basename = {} - for dirpath, _, filenames in os.walk(d): - for fn in filenames: - if fn.endswith(".apk"): - fullname = os.path.join(dirpath, fn) - displayname = fullname[len(d)+1:] - apk = APK(fullname, displayname) - self.apks[apk.package] = apk - self.apks_by_basename[os.path.basename(apk.filename)] = apk - - self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) - self.max_fn_len = max(self.max_fn_len, len(apk.filename)) - finally: - shutil.rmtree(d) - - self.certmap = common.ReadApkCerts(z) - z.close() - - def CheckSharedUids(self): - """Look for any instances where packages signed with different - certs request the same sharedUserId.""" - apks_by_uid = {} - for apk in self.apks.itervalues(): - if apk.shared_uid: - apks_by_uid.setdefault(apk.shared_uid, []).append(apk) - - for uid in sorted(apks_by_uid.keys()): - apks = apks_by_uid[uid] - for apk in apks[1:]: - if apk.certs != apks[0].certs: - break - else: - # all packages have the same set of certs; this uid is fine. - continue - - AddProblem("different cert sets for packages with uid %s" % (uid,)) - - print "uid %s is shared by packages with different cert sets:" % (uid,) - for apk in apks: - print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) - for cert in apk.certs: - print " ", ALL_CERTS.Get(cert) - print - - def CheckExternalSignatures(self): - for apk_filename, certname in self.certmap.iteritems(): - if certname == "EXTERNAL": - # Apps marked EXTERNAL should be signed with the test key - # during development, then manually re-signed after - # predexopting. Consider it an error if this app is now - # signed with any key that is present in our tree. - apk = self.apks_by_basename[apk_filename] - name = ALL_CERTS.Get(apk.cert) - if not name.startswith("unknown "): - Push(apk.filename) - AddProblem("hasn't been signed with EXTERNAL cert") - Pop() - - def PrintCerts(self): - """Display a table of packages grouped by cert.""" - by_cert = {} - for apk in self.apks.itervalues(): - for cert in apk.certs: - by_cert.setdefault(cert, []).append((apk.package, apk)) - - order = [(-len(v), k) for (k, v) in by_cert.iteritems()] - order.sort() - - for _, cert in order: - print "%s:" % (ALL_CERTS.Get(cert),) - apks = by_cert[cert] - apks.sort() - for _, apk in apks: - if apk.shared_uid: - print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, - self.max_pkg_len, apk.package, - apk.shared_uid) - else: - print " %-*s %-*s" % (self.max_fn_len, apk.filename, - self.max_pkg_len, apk.package) - print - - def CompareWith(self, other): - """Look for instances where a given package that exists in both - self and other have different certs.""" - - all_apks = set(self.apks.keys()) - all_apks.update(other.apks.keys()) - - max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) - - by_certpair = {} - - for i in all_apks: - if i in self.apks: - if i in other.apks: - # in both; should have same set of certs - if self.apks[i].certs != other.apks[i].certs: - by_certpair.setdefault((other.apks[i].certs, - self.apks[i].certs), []).append(i) - else: - print "%s [%s]: new APK (not in comparison target_files)" % ( - i, self.apks[i].filename) - else: - if i in other.apks: - print "%s [%s]: removed APK (only in comparison target_files)" % ( - i, other.apks[i].filename) - - if by_certpair: - AddProblem("some APKs changed certs") - Banner("APK signing differences") - for (old, new), packages in sorted(by_certpair.items()): - for i, o in enumerate(old): - if i == 0: - print "was", ALL_CERTS.Get(o) - else: - print " ", ALL_CERTS.Get(o) - for i, n in enumerate(new): - if i == 0: - print "now", ALL_CERTS.Get(n) - else: - print " ", ALL_CERTS.Get(n) - for i in sorted(packages): - old_fn = other.apks[i].filename - new_fn = self.apks[i].filename - if old_fn == new_fn: - print " %-*s [%s]" % (max_pkg_len, i, old_fn) - else: - print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, - old_fn, new_fn) - print - - -def main(argv): - def option_handler(o, a): - if o in ("-c", "--compare_with"): - OPTIONS.compare_with = a - elif o in ("-l", "--local_cert_dirs"): - OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] - elif o in ("-t", "--text"): - OPTIONS.text = True - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="c:l:t", - extra_long_opts=["compare_with=", - "local_cert_dirs="], - extra_option_handler=option_handler) - - if len(args) != 1: - common.Usage(__doc__) - sys.exit(1) - - ALL_CERTS.FindLocalCerts() - - Push("input target_files:") - try: - target_files = TargetFiles() - target_files.LoadZipFile(args[0]) - finally: - Pop() - - compare_files = None - if OPTIONS.compare_with: - Push("comparison target_files:") - try: - compare_files = TargetFiles() - compare_files.LoadZipFile(OPTIONS.compare_with) - finally: - Pop() - - if OPTIONS.text or not compare_files: - Banner("target files") - target_files.PrintCerts() - target_files.CheckSharedUids() - target_files.CheckExternalSignatures() - if compare_files: - if OPTIONS.text: - Banner("comparison files") - compare_files.PrintCerts() - target_files.CompareWith(compare_files) - - if PROBLEMS: - print "%d problem(s) found:\n" % (len(PROBLEMS),) - for p in PROBLEMS: - print p - return 1 - - return 0 - - -if __name__ == '__main__': - try: - r = main(sys.argv[1:]) - sys.exit(r) - except common.ExternalError as e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures new file mode 120000 index 000000000..9f62aa323 --- /dev/null +++ b/tools/releasetools/check_target_files_signatures @@ -0,0 +1 @@ +check_target_files_signatures.py \ No newline at end of file diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py new file mode 100755 index 000000000..5c541abc6 --- /dev/null +++ b/tools/releasetools/check_target_files_signatures.py @@ -0,0 +1,442 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Check the signatures of all APKs in a target_files .zip file. With +-c, compare the signatures of each package to the ones in a separate +target_files (usually a previously distributed build for the same +device) and flag any changes. + +Usage: check_target_file_signatures [flags] target_files + + -c (--compare_with) + Look for compatibility problems between the two sets of target + files (eg., packages whose keys have changed). + + -l (--local_cert_dirs) + Comma-separated list of top-level directories to scan for + .x509.pem files. Defaults to "vendor,build". Where cert files + can be found that match APK signatures, the filename will be + printed as the cert name, otherwise a hash of the cert plus its + subject string will be printed instead. + + -t (--text) + Dump the certificate information for both packages in comparison + mode (this output is normally suppressed). + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import os +import re +import shutil +import subprocess +import zipfile + +import common + +# Work around a bug in python's zipfile module that prevents opening +# of zipfiles if any entry has an extra field of between 1 and 3 bytes +# (which is common with zipaligned APKs). This overrides the +# ZipInfo._decodeExtra() method (which contains the bug) with an empty +# version (since we don't need to decode the extra field anyway). +class MyZipInfo(zipfile.ZipInfo): + def _decodeExtra(self): + pass +zipfile.ZipInfo = MyZipInfo + +OPTIONS = common.OPTIONS + +OPTIONS.text = False +OPTIONS.compare_with = None +OPTIONS.local_cert_dirs = ("vendor", "build") + +PROBLEMS = [] +PROBLEM_PREFIX = [] + +def AddProblem(msg): + PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) +def Push(msg): + PROBLEM_PREFIX.append(msg) +def Pop(): + PROBLEM_PREFIX.pop() + + +def Banner(msg): + print "-" * 70 + print " ", msg + print "-" * 70 + + +def GetCertSubject(cert): + p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = p.communicate(cert) + if err and not err.strip(): + return "(error reading cert subject)" + for line in out.split("\n"): + line = line.strip() + if line.startswith("Subject:"): + return line[8:].strip() + return "(unknown cert subject)" + + +class CertDB(object): + def __init__(self): + self.certs = {} + + def Add(self, cert, name=None): + if cert in self.certs: + if name: + self.certs[cert] = self.certs[cert] + "," + name + else: + if name is None: + name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], + GetCertSubject(cert)) + self.certs[cert] = name + + def Get(self, cert): + """Return the name for a given cert.""" + return self.certs.get(cert, None) + + def FindLocalCerts(self): + to_load = [] + for top in OPTIONS.local_cert_dirs: + for dirpath, _, filenames in os.walk(top): + certs = [os.path.join(dirpath, i) + for i in filenames if i.endswith(".x509.pem")] + if certs: + to_load.extend(certs) + + for i in to_load: + f = open(i) + cert = common.ParseCertificate(f.read()) + f.close() + name, _ = os.path.splitext(i) + name, _ = os.path.splitext(name) + self.Add(cert, name) + +ALL_CERTS = CertDB() + + +def CertFromPKCS7(data, filename): + """Read the cert out of a PKCS#7-format file (which is what is + stored in a signed .apk).""" + Push(filename + ":") + try: + p = common.Run(["openssl", "pkcs7", + "-inform", "DER", + "-outform", "PEM", + "-print_certs"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = p.communicate(data) + if err and not err.strip(): + AddProblem("error reading cert:\n" + err) + return None + + cert = common.ParseCertificate(out) + if not cert: + AddProblem("error parsing cert output") + return None + return cert + finally: + Pop() + + +class APK(object): + def __init__(self, full_filename, filename): + self.filename = filename + self.certs = None + self.shared_uid = None + self.package = None + + Push(filename+":") + try: + self.RecordCerts(full_filename) + self.ReadManifest(full_filename) + finally: + Pop() + + def RecordCerts(self, full_filename): + out = set() + try: + f = open(full_filename) + apk = zipfile.ZipFile(f, "r") + pkcs7 = None + for info in apk.infolist(): + if info.filename.startswith("META-INF/") and \ + (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): + pkcs7 = apk.read(info.filename) + cert = CertFromPKCS7(pkcs7, info.filename) + out.add(cert) + ALL_CERTS.Add(cert) + if not pkcs7: + AddProblem("no signature") + finally: + f.close() + self.certs = frozenset(out) + + def ReadManifest(self, full_filename): + p = common.Run(["aapt", "dump", "xmltree", full_filename, + "AndroidManifest.xml"], + stdout=subprocess.PIPE) + manifest, err = p.communicate() + if err: + AddProblem("failed to read manifest") + return + + self.shared_uid = None + self.package = None + + for line in manifest.split("\n"): + line = line.strip() + m = re.search(r'A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) + if m: + name = m.group(1) + if name == "android:sharedUserId": + if self.shared_uid is not None: + AddProblem("multiple sharedUserId declarations") + self.shared_uid = m.group(2) + elif name == "package": + if self.package is not None: + AddProblem("multiple package declarations") + self.package = m.group(2) + + if self.package is None: + AddProblem("no package declaration") + + +class TargetFiles(object): + def __init__(self): + self.max_pkg_len = 30 + self.max_fn_len = 20 + self.apks = None + self.apks_by_basename = None + self.certmap = None + + def LoadZipFile(self, filename): + d, z = common.UnzipTemp(filename, '*.apk') + try: + self.apks = {} + self.apks_by_basename = {} + for dirpath, _, filenames in os.walk(d): + for fn in filenames: + if fn.endswith(".apk"): + fullname = os.path.join(dirpath, fn) + displayname = fullname[len(d)+1:] + apk = APK(fullname, displayname) + self.apks[apk.package] = apk + self.apks_by_basename[os.path.basename(apk.filename)] = apk + + self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) + self.max_fn_len = max(self.max_fn_len, len(apk.filename)) + finally: + shutil.rmtree(d) + + self.certmap = common.ReadApkCerts(z) + z.close() + + def CheckSharedUids(self): + """Look for any instances where packages signed with different + certs request the same sharedUserId.""" + apks_by_uid = {} + for apk in self.apks.itervalues(): + if apk.shared_uid: + apks_by_uid.setdefault(apk.shared_uid, []).append(apk) + + for uid in sorted(apks_by_uid.keys()): + apks = apks_by_uid[uid] + for apk in apks[1:]: + if apk.certs != apks[0].certs: + break + else: + # all packages have the same set of certs; this uid is fine. + continue + + AddProblem("different cert sets for packages with uid %s" % (uid,)) + + print "uid %s is shared by packages with different cert sets:" % (uid,) + for apk in apks: + print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) + for cert in apk.certs: + print " ", ALL_CERTS.Get(cert) + print + + def CheckExternalSignatures(self): + for apk_filename, certname in self.certmap.iteritems(): + if certname == "EXTERNAL": + # Apps marked EXTERNAL should be signed with the test key + # during development, then manually re-signed after + # predexopting. Consider it an error if this app is now + # signed with any key that is present in our tree. + apk = self.apks_by_basename[apk_filename] + name = ALL_CERTS.Get(apk.cert) + if not name.startswith("unknown "): + Push(apk.filename) + AddProblem("hasn't been signed with EXTERNAL cert") + Pop() + + def PrintCerts(self): + """Display a table of packages grouped by cert.""" + by_cert = {} + for apk in self.apks.itervalues(): + for cert in apk.certs: + by_cert.setdefault(cert, []).append((apk.package, apk)) + + order = [(-len(v), k) for (k, v) in by_cert.iteritems()] + order.sort() + + for _, cert in order: + print "%s:" % (ALL_CERTS.Get(cert),) + apks = by_cert[cert] + apks.sort() + for _, apk in apks: + if apk.shared_uid: + print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, + self.max_pkg_len, apk.package, + apk.shared_uid) + else: + print " %-*s %-*s" % (self.max_fn_len, apk.filename, + self.max_pkg_len, apk.package) + print + + def CompareWith(self, other): + """Look for instances where a given package that exists in both + self and other have different certs.""" + + all_apks = set(self.apks.keys()) + all_apks.update(other.apks.keys()) + + max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) + + by_certpair = {} + + for i in all_apks: + if i in self.apks: + if i in other.apks: + # in both; should have same set of certs + if self.apks[i].certs != other.apks[i].certs: + by_certpair.setdefault((other.apks[i].certs, + self.apks[i].certs), []).append(i) + else: + print "%s [%s]: new APK (not in comparison target_files)" % ( + i, self.apks[i].filename) + else: + if i in other.apks: + print "%s [%s]: removed APK (only in comparison target_files)" % ( + i, other.apks[i].filename) + + if by_certpair: + AddProblem("some APKs changed certs") + Banner("APK signing differences") + for (old, new), packages in sorted(by_certpair.items()): + for i, o in enumerate(old): + if i == 0: + print "was", ALL_CERTS.Get(o) + else: + print " ", ALL_CERTS.Get(o) + for i, n in enumerate(new): + if i == 0: + print "now", ALL_CERTS.Get(n) + else: + print " ", ALL_CERTS.Get(n) + for i in sorted(packages): + old_fn = other.apks[i].filename + new_fn = self.apks[i].filename + if old_fn == new_fn: + print " %-*s [%s]" % (max_pkg_len, i, old_fn) + else: + print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, + old_fn, new_fn) + print + + +def main(argv): + def option_handler(o, a): + if o in ("-c", "--compare_with"): + OPTIONS.compare_with = a + elif o in ("-l", "--local_cert_dirs"): + OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] + elif o in ("-t", "--text"): + OPTIONS.text = True + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="c:l:t", + extra_long_opts=["compare_with=", + "local_cert_dirs="], + extra_option_handler=option_handler) + + if len(args) != 1: + common.Usage(__doc__) + sys.exit(1) + + ALL_CERTS.FindLocalCerts() + + Push("input target_files:") + try: + target_files = TargetFiles() + target_files.LoadZipFile(args[0]) + finally: + Pop() + + compare_files = None + if OPTIONS.compare_with: + Push("comparison target_files:") + try: + compare_files = TargetFiles() + compare_files.LoadZipFile(OPTIONS.compare_with) + finally: + Pop() + + if OPTIONS.text or not compare_files: + Banner("target files") + target_files.PrintCerts() + target_files.CheckSharedUids() + target_files.CheckExternalSignatures() + if compare_files: + if OPTIONS.text: + Banner("comparison files") + compare_files.PrintCerts() + target_files.CompareWith(compare_files) + + if PROBLEMS: + print "%d problem(s) found:\n" % (len(PROBLEMS),) + for p in PROBLEMS: + print p + return 1 + + return 0 + + +if __name__ == '__main__': + try: + r = main(sys.argv[1:]) + sys.exit(r) + except common.ExternalError as e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) diff --git a/tools/releasetools/check_target_files_signatures.tmp b/tools/releasetools/check_target_files_signatures.tmp deleted file mode 120000 index 9f62aa323..000000000 --- a/tools/releasetools/check_target_files_signatures.tmp +++ /dev/null @@ -1 +0,0 @@ -check_target_files_signatures.py \ No newline at end of file diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch deleted file mode 100755 index 08d145008..000000000 --- a/tools/releasetools/make_recovery_patch +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2014 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import os -import common - -OPTIONS = common.OPTIONS - -def main(argv): - # def option_handler(o, a): - # return False - - args = common.ParseOptions(argv, __doc__) - input_dir, output_dir = args - - OPTIONS.info_dict = common.LoadInfoDict(input_dir) - - recovery_img = common.GetBootableImage("recovery.img", "recovery.img", - input_dir, "RECOVERY") - boot_img = common.GetBootableImage("boot.img", "boot.img", - input_dir, "BOOT") - - if not recovery_img or not boot_img: - sys.exit(0) - - def output_sink(fn, data): - with open(os.path.join(output_dir, "SYSTEM", *fn.split("/")), "wb") as f: - f.write(data) - - common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch new file mode 120000 index 000000000..45cec0862 --- /dev/null +++ b/tools/releasetools/make_recovery_patch @@ -0,0 +1 @@ +make_recovery_patch.py \ No newline at end of file diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py new file mode 100755 index 000000000..08d145008 --- /dev/null +++ b/tools/releasetools/make_recovery_patch.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import os +import common + +OPTIONS = common.OPTIONS + +def main(argv): + # def option_handler(o, a): + # return False + + args = common.ParseOptions(argv, __doc__) + input_dir, output_dir = args + + OPTIONS.info_dict = common.LoadInfoDict(input_dir) + + recovery_img = common.GetBootableImage("recovery.img", "recovery.img", + input_dir, "RECOVERY") + boot_img = common.GetBootableImage("boot.img", "boot.img", + input_dir, "BOOT") + + if not recovery_img or not boot_img: + sys.exit(0) + + def output_sink(fn, data): + with open(os.path.join(output_dir, "SYSTEM", *fn.split("/")), "wb") as f: + f.write(data) + + common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/releasetools/make_recovery_patch.tmp b/tools/releasetools/make_recovery_patch.tmp deleted file mode 120000 index 45cec0862..000000000 --- a/tools/releasetools/make_recovery_patch.tmp +++ /dev/null @@ -1 +0,0 @@ -make_recovery_patch.py \ No newline at end of file diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks deleted file mode 100755 index 290fdb990..000000000 --- a/tools/releasetools/sign_target_files_apks +++ /dev/null @@ -1,520 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2008 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Signs all the APK files in a target-files zipfile, producing a new -target-files zip. - -Usage: sign_target_files_apks [flags] input_target_files output_target_files - - -e (--extra_apks) - Add extra APK name/key pairs as though they appeared in - apkcerts.txt (so mappings specified by -k and -d are applied). - Keys specified in -e override any value for that app contained - in the apkcerts.txt file. Option may be repeated to give - multiple extra packages. - - -k (--key_mapping) - Add a mapping from the key name as specified in apkcerts.txt (the - src_key) to the real key you wish to sign the package with - (dest_key). Option may be repeated to give multiple key - mappings. - - -d (--default_key_mappings) - Set up the following key mappings: - - $devkey/devkey ==> $dir/releasekey - $devkey/testkey ==> $dir/releasekey - $devkey/media ==> $dir/media - $devkey/shared ==> $dir/shared - $devkey/platform ==> $dir/platform - - where $devkey is the directory part of the value of - default_system_dev_certificate from the input target-files's - META/misc_info.txt. (Defaulting to "build/target/product/security" - if the value is not present in misc_info. - - -d and -k options are added to the set of mappings in the order - in which they appear on the command line. - - -o (--replace_ota_keys) - Replace the certificate (public key) used by OTA package - verification with the one specified in the input target_files - zip (in the META/otakeys.txt file). Key remapping (-k and -d) - is performed on this key. - - -t (--tag_changes) <+tag>,<-tag>,... - Comma-separated list of changes to make to the set of tags (in - the last component of the build fingerprint). Prefix each with - '+' or '-' to indicate whether that tag should be added or - removed. Changes are processed in the order they appear. - Default value is "-test-keys,-dev-keys,+release-keys". - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import base64 -import cStringIO -import copy -import errno -import os -import re -import shutil -import subprocess -import tempfile -import zipfile - -import add_img_to_target_files -import common - -OPTIONS = common.OPTIONS - -OPTIONS.extra_apks = {} -OPTIONS.key_map = {} -OPTIONS.replace_ota_keys = False -OPTIONS.replace_verity_public_key = False -OPTIONS.replace_verity_private_key = False -OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") - -def GetApkCerts(tf_zip): - certmap = common.ReadApkCerts(tf_zip) - - # apply the key remapping to the contents of the file - for apk, cert in certmap.iteritems(): - certmap[apk] = OPTIONS.key_map.get(cert, cert) - - # apply all the -e options, overriding anything in the file - for apk, cert in OPTIONS.extra_apks.iteritems(): - if not cert: - cert = "PRESIGNED" - certmap[apk] = OPTIONS.key_map.get(cert, cert) - - return certmap - - -def CheckAllApksSigned(input_tf_zip, apk_key_map): - """Check that all the APKs we want to sign have keys specified, and - error out if they don't.""" - unknown_apks = [] - for info in input_tf_zip.infolist(): - if info.filename.endswith(".apk"): - name = os.path.basename(info.filename) - if name not in apk_key_map: - unknown_apks.append(name) - if unknown_apks: - print "ERROR: no key specified for:\n\n ", - print "\n ".join(unknown_apks) - print "\nUse '-e =' to specify a key (which may be an" - print "empty string to not sign this apk)." - sys.exit(1) - - -def SignApk(data, keyname, pw): - unsigned = tempfile.NamedTemporaryFile() - unsigned.write(data) - unsigned.flush() - - signed = tempfile.NamedTemporaryFile() - - common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) - - data = signed.read() - unsigned.close() - signed.close() - - return data - - -def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, - apk_key_map, key_passwords): - - maxsize = max([len(os.path.basename(i.filename)) - for i in input_tf_zip.infolist() - if i.filename.endswith('.apk')]) - rebuild_recovery = False - - tmpdir = tempfile.mkdtemp() - def write_to_temp(fn, attr, data): - fn = os.path.join(tmpdir, fn) - if fn.endswith("/"): - fn = os.path.join(tmpdir, fn) - os.mkdir(fn) - else: - d = os.path.dirname(fn) - if d and not os.path.exists(d): - os.makedirs(d) - - if attr >> 16 == 0xa1ff: - os.symlink(data, fn) - else: - with open(fn, "wb") as f: - f.write(data) - - for info in input_tf_zip.infolist(): - if info.filename.startswith("IMAGES/"): - continue - - data = input_tf_zip.read(info.filename) - out_info = copy.copy(info) - - if (info.filename == "META/misc_info.txt" and - OPTIONS.replace_verity_private_key): - ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, - OPTIONS.replace_verity_private_key[1]) - elif (info.filename == "BOOT/RAMDISK/verity_key" and - OPTIONS.replace_verity_public_key): - new_data = ReplaceVerityPublicKey(output_tf_zip, - OPTIONS.replace_verity_public_key[1]) - write_to_temp(info.filename, info.external_attr, new_data) - elif (info.filename.startswith("BOOT/") or - info.filename.startswith("RECOVERY/") or - info.filename.startswith("META/") or - info.filename == "SYSTEM/etc/recovery-resource.dat"): - write_to_temp(info.filename, info.external_attr, data) - - if info.filename.endswith(".apk"): - name = os.path.basename(info.filename) - key = apk_key_map[name] - if key not in common.SPECIAL_CERT_STRINGS: - print " signing: %-*s (%s)" % (maxsize, name, key) - signed_data = SignApk(data, key, key_passwords[key]) - common.ZipWriteStr(output_tf_zip, out_info, signed_data) - else: - # an APK we're not supposed to sign. - print "NOT signing: %s" % (name,) - common.ZipWriteStr(output_tf_zip, out_info, data) - elif info.filename in ("SYSTEM/build.prop", - "VENDOR/build.prop", - "BOOT/RAMDISK/default.prop", - "RECOVERY/RAMDISK/default.prop"): - print "rewriting %s:" % (info.filename,) - new_data = RewriteProps(data, misc_info) - common.ZipWriteStr(output_tf_zip, out_info, new_data) - if info.filename in ("BOOT/RAMDISK/default.prop", - "RECOVERY/RAMDISK/default.prop"): - write_to_temp(info.filename, info.external_attr, new_data) - elif info.filename.endswith("mac_permissions.xml"): - print "rewriting %s with new keys." % (info.filename,) - new_data = ReplaceCerts(data) - common.ZipWriteStr(output_tf_zip, out_info, new_data) - elif info.filename in ("SYSTEM/recovery-from-boot.p", - "SYSTEM/bin/install-recovery.sh"): - rebuild_recovery = True - elif (OPTIONS.replace_ota_keys and - info.filename in ("RECOVERY/RAMDISK/res/keys", - "SYSTEM/etc/security/otacerts.zip")): - # don't copy these files if we're regenerating them below - pass - elif (OPTIONS.replace_verity_private_key and - info.filename == "META/misc_info.txt"): - pass - elif (OPTIONS.replace_verity_public_key and - info.filename == "BOOT/RAMDISK/verity_key"): - pass - else: - # a non-APK file; copy it verbatim - common.ZipWriteStr(output_tf_zip, out_info, data) - - if OPTIONS.replace_ota_keys: - new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) - if new_recovery_keys: - write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys) - - if rebuild_recovery: - recovery_img = common.GetBootableImage( - "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info) - boot_img = common.GetBootableImage( - "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info) - - def output_sink(fn, data): - common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data) - - common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img, - info_dict=misc_info) - - shutil.rmtree(tmpdir) - - -def ReplaceCerts(data): - """Given a string of data, replace all occurences of a set - of X509 certs with a newer set of X509 certs and return - the updated data string.""" - for old, new in OPTIONS.key_map.iteritems(): - try: - if OPTIONS.verbose: - print " Replacing %s.x509.pem with %s.x509.pem" % (old, new) - f = open(old + ".x509.pem") - old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() - f.close() - f = open(new + ".x509.pem") - new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() - f.close() - # Only match entire certs. - pattern = "\\b"+old_cert16+"\\b" - (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE) - if OPTIONS.verbose: - print " Replaced %d occurence(s) of %s.x509.pem with " \ - "%s.x509.pem" % (num, old, new) - except IOError as e: - if e.errno == errno.ENOENT and not OPTIONS.verbose: - continue - - print " Error accessing %s. %s. Skip replacing %s.x509.pem " \ - "with %s.x509.pem." % (e.filename, e.strerror, old, new) - - return data - - -def EditTags(tags): - """Given a string containing comma-separated tags, apply the edits - specified in OPTIONS.tag_changes and return the updated string.""" - tags = set(tags.split(",")) - for ch in OPTIONS.tag_changes: - if ch[0] == "-": - tags.discard(ch[1:]) - elif ch[0] == "+": - tags.add(ch[1:]) - return ",".join(sorted(tags)) - - -def RewriteProps(data, misc_info): - output = [] - for line in data.split("\n"): - line = line.strip() - original_line = line - if line and line[0] != '#' and "=" in line: - key, value = line.split("=", 1) - if (key in ("ro.build.fingerprint", "ro.vendor.build.fingerprint") - and misc_info.get("oem_fingerprint_properties") is None): - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint") - and misc_info.get("oem_fingerprint_properties") is not None): - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif key == "ro.bootimage.build.fingerprint": - pieces = value.split("/") - pieces[-1] = EditTags(pieces[-1]) - value = "/".join(pieces) - elif key == "ro.build.description": - pieces = value.split(" ") - #assert len(pieces) == 5 - pieces[-1] = EditTags(pieces[-1]) - value = " ".join(pieces) - elif key == "ro.build.tags": - value = EditTags(value) - elif key == "ro.build.display.id": - # change, eg, "JWR66N dev-keys" to "JWR66N" - value = value.split() - if len(value) > 1 and value[-1].endswith("-keys"): - value.pop() - value = " ".join(value) - line = key + "=" + value - if line != original_line: - print " replace: ", original_line - print " with: ", line - output.append(line) - return "\n".join(output) + "\n" - - -def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): - try: - keylist = input_tf_zip.read("META/otakeys.txt").split() - except KeyError: - raise common.ExternalError("can't read META/otakeys.txt from input") - - extra_recovery_keys = misc_info.get("extra_recovery_keys", None) - if extra_recovery_keys: - extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" - for k in extra_recovery_keys.split()] - if extra_recovery_keys: - print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) - else: - extra_recovery_keys = [] - - mapped_keys = [] - for k in keylist: - m = re.match(r"^(.*)\.x509\.pem$", k) - if not m: - raise common.ExternalError( - "can't parse \"%s\" from META/otakeys.txt" % (k,)) - k = m.group(1) - mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") - - if mapped_keys: - print "using:\n ", "\n ".join(mapped_keys) - print "for OTA package verification" - else: - devkey = misc_info.get("default_system_dev_certificate", - "build/target/product/security/testkey") - mapped_keys.append( - OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") - print "META/otakeys.txt has no keys; using", mapped_keys[0] - - # recovery uses a version of the key that has been slightly - # predigested (by DumpPublicKey.java) and put in res/keys. - # extra_recovery_keys are used only in recovery. - - p = common.Run(["java", "-jar", - os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] - + mapped_keys + extra_recovery_keys, - stdout=subprocess.PIPE) - new_recovery_keys, _ = p.communicate() - if p.returncode != 0: - raise common.ExternalError("failed to run dumpkeys") - common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", - new_recovery_keys) - - # Save the base64 key representation in the update for key-change - # validations - p = common.Run(["python", "build/tools/getb64key.py", mapped_keys[0]], - stdout=subprocess.PIPE) - data, _ = p.communicate() - if p.returncode == 0: - common.ZipWriteStr(output_tf_zip, "META/releasekey.txt", data) - - # SystemUpdateActivity uses the x509.pem version of the keys, but - # put into a zipfile system/etc/security/otacerts.zip. - # We DO NOT include the extra_recovery_keys (if any) here. - - temp_file = cStringIO.StringIO() - certs_zip = zipfile.ZipFile(temp_file, "w") - for k in mapped_keys: - certs_zip.write(k) - certs_zip.close() - common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", - temp_file.getvalue()) - - return new_recovery_keys - -def ReplaceVerityPublicKey(targetfile_zip, key_path): - print "Replacing verity public key with %s" % key_path - with open(key_path) as f: - data = f.read() - common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data) - return data - -def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, - misc_info, key_path): - print "Replacing verity private key with %s" % key_path - current_key = misc_info["verity_key"] - original_misc_info = targetfile_input_zip.read("META/misc_info.txt") - new_misc_info = original_misc_info.replace(current_key, key_path) - common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info) - misc_info["verity_key"] = key_path - -def BuildKeyMap(misc_info, key_mapping_options): - for s, d in key_mapping_options: - if s is None: # -d option - devkey = misc_info.get("default_system_dev_certificate", - "build/target/product/security/testkey") - devkeydir = os.path.dirname(devkey) - - OPTIONS.key_map.update({ - devkeydir + "/testkey": d + "/releasekey", - devkeydir + "/devkey": d + "/releasekey", - devkeydir + "/media": d + "/media", - devkeydir + "/shared": d + "/shared", - devkeydir + "/platform": d + "/platform", - }) - else: - OPTIONS.key_map[s] = d - - -def main(argv): - - key_mapping_options = [] - - def option_handler(o, a): - if o in ("-e", "--extra_apks"): - names, key = a.split("=") - names = names.split(",") - for n in names: - OPTIONS.extra_apks[n] = key - elif o in ("-d", "--default_key_mappings"): - key_mapping_options.append((None, a)) - elif o in ("-k", "--key_mapping"): - key_mapping_options.append(a.split("=", 1)) - elif o in ("-o", "--replace_ota_keys"): - OPTIONS.replace_ota_keys = True - elif o in ("-t", "--tag_changes"): - new = [] - for i in a.split(","): - i = i.strip() - if not i or i[0] not in "-+": - raise ValueError("Bad tag change '%s'" % (i,)) - new.append(i[0] + i[1:].strip()) - OPTIONS.tag_changes = tuple(new) - elif o == "--replace_verity_public_key": - OPTIONS.replace_verity_public_key = (True, a) - elif o == "--replace_verity_private_key": - OPTIONS.replace_verity_private_key = (True, a) - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="e:d:k:ot:", - extra_long_opts=["extra_apks=", - "default_key_mappings=", - "key_mapping=", - "replace_ota_keys", - "tag_changes=", - "replace_verity_public_key=", - "replace_verity_private_key="], - extra_option_handler=option_handler) - - if len(args) != 2: - common.Usage(__doc__) - sys.exit(1) - - input_zip = zipfile.ZipFile(args[0], "r") - output_zip = zipfile.ZipFile(args[1], "w") - - misc_info = common.LoadInfoDict(input_zip) - - BuildKeyMap(misc_info, key_mapping_options) - - apk_key_map = GetApkCerts(input_zip) - CheckAllApksSigned(input_zip, apk_key_map) - - key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) - ProcessTargetFiles(input_zip, output_zip, misc_info, - apk_key_map, key_passwords) - - common.ZipClose(input_zip) - common.ZipClose(output_zip) - - add_img_to_target_files.AddImagesToTargetFiles(args[1]) - - print "done." - - -if __name__ == '__main__': - try: - main(sys.argv[1:]) - except common.ExternalError, e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks new file mode 120000 index 000000000..b5ec59a25 --- /dev/null +++ b/tools/releasetools/sign_target_files_apks @@ -0,0 +1 @@ +sign_target_files_apks.py \ No newline at end of file diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py new file mode 100755 index 000000000..60d62c212 --- /dev/null +++ b/tools/releasetools/sign_target_files_apks.py @@ -0,0 +1,512 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Signs all the APK files in a target-files zipfile, producing a new +target-files zip. + +Usage: sign_target_files_apks [flags] input_target_files output_target_files + + -e (--extra_apks) + Add extra APK name/key pairs as though they appeared in + apkcerts.txt (so mappings specified by -k and -d are applied). + Keys specified in -e override any value for that app contained + in the apkcerts.txt file. Option may be repeated to give + multiple extra packages. + + -k (--key_mapping) + Add a mapping from the key name as specified in apkcerts.txt (the + src_key) to the real key you wish to sign the package with + (dest_key). Option may be repeated to give multiple key + mappings. + + -d (--default_key_mappings) + Set up the following key mappings: + + $devkey/devkey ==> $dir/releasekey + $devkey/testkey ==> $dir/releasekey + $devkey/media ==> $dir/media + $devkey/shared ==> $dir/shared + $devkey/platform ==> $dir/platform + + where $devkey is the directory part of the value of + default_system_dev_certificate from the input target-files's + META/misc_info.txt. (Defaulting to "build/target/product/security" + if the value is not present in misc_info. + + -d and -k options are added to the set of mappings in the order + in which they appear on the command line. + + -o (--replace_ota_keys) + Replace the certificate (public key) used by OTA package + verification with the one specified in the input target_files + zip (in the META/otakeys.txt file). Key remapping (-k and -d) + is performed on this key. + + -t (--tag_changes) <+tag>,<-tag>,... + Comma-separated list of changes to make to the set of tags (in + the last component of the build fingerprint). Prefix each with + '+' or '-' to indicate whether that tag should be added or + removed. Changes are processed in the order they appear. + Default value is "-test-keys,-dev-keys,+release-keys". + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import base64 +import cStringIO +import copy +import errno +import os +import re +import shutil +import subprocess +import tempfile +import zipfile + +import add_img_to_target_files +import common + +OPTIONS = common.OPTIONS + +OPTIONS.extra_apks = {} +OPTIONS.key_map = {} +OPTIONS.replace_ota_keys = False +OPTIONS.replace_verity_public_key = False +OPTIONS.replace_verity_private_key = False +OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") + +def GetApkCerts(tf_zip): + certmap = common.ReadApkCerts(tf_zip) + + # apply the key remapping to the contents of the file + for apk, cert in certmap.iteritems(): + certmap[apk] = OPTIONS.key_map.get(cert, cert) + + # apply all the -e options, overriding anything in the file + for apk, cert in OPTIONS.extra_apks.iteritems(): + if not cert: + cert = "PRESIGNED" + certmap[apk] = OPTIONS.key_map.get(cert, cert) + + return certmap + + +def CheckAllApksSigned(input_tf_zip, apk_key_map): + """Check that all the APKs we want to sign have keys specified, and + error out if they don't.""" + unknown_apks = [] + for info in input_tf_zip.infolist(): + if info.filename.endswith(".apk"): + name = os.path.basename(info.filename) + if name not in apk_key_map: + unknown_apks.append(name) + if unknown_apks: + print "ERROR: no key specified for:\n\n ", + print "\n ".join(unknown_apks) + print "\nUse '-e =' to specify a key (which may be an" + print "empty string to not sign this apk)." + sys.exit(1) + + +def SignApk(data, keyname, pw): + unsigned = tempfile.NamedTemporaryFile() + unsigned.write(data) + unsigned.flush() + + signed = tempfile.NamedTemporaryFile() + + common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) + + data = signed.read() + unsigned.close() + signed.close() + + return data + + +def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, + apk_key_map, key_passwords): + + maxsize = max([len(os.path.basename(i.filename)) + for i in input_tf_zip.infolist() + if i.filename.endswith('.apk')]) + rebuild_recovery = False + + tmpdir = tempfile.mkdtemp() + def write_to_temp(fn, attr, data): + fn = os.path.join(tmpdir, fn) + if fn.endswith("/"): + fn = os.path.join(tmpdir, fn) + os.mkdir(fn) + else: + d = os.path.dirname(fn) + if d and not os.path.exists(d): + os.makedirs(d) + + if attr >> 16 == 0xa1ff: + os.symlink(data, fn) + else: + with open(fn, "wb") as f: + f.write(data) + + for info in input_tf_zip.infolist(): + if info.filename.startswith("IMAGES/"): + continue + + data = input_tf_zip.read(info.filename) + out_info = copy.copy(info) + + if (info.filename == "META/misc_info.txt" and + OPTIONS.replace_verity_private_key): + ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, + OPTIONS.replace_verity_private_key[1]) + elif (info.filename == "BOOT/RAMDISK/verity_key" and + OPTIONS.replace_verity_public_key): + new_data = ReplaceVerityPublicKey(output_tf_zip, + OPTIONS.replace_verity_public_key[1]) + write_to_temp(info.filename, info.external_attr, new_data) + elif (info.filename.startswith("BOOT/") or + info.filename.startswith("RECOVERY/") or + info.filename.startswith("META/") or + info.filename == "SYSTEM/etc/recovery-resource.dat"): + write_to_temp(info.filename, info.external_attr, data) + + if info.filename.endswith(".apk"): + name = os.path.basename(info.filename) + key = apk_key_map[name] + if key not in common.SPECIAL_CERT_STRINGS: + print " signing: %-*s (%s)" % (maxsize, name, key) + signed_data = SignApk(data, key, key_passwords[key]) + common.ZipWriteStr(output_tf_zip, out_info, signed_data) + else: + # an APK we're not supposed to sign. + print "NOT signing: %s" % (name,) + common.ZipWriteStr(output_tf_zip, out_info, data) + elif info.filename in ("SYSTEM/build.prop", + "VENDOR/build.prop", + "BOOT/RAMDISK/default.prop", + "RECOVERY/RAMDISK/default.prop"): + print "rewriting %s:" % (info.filename,) + new_data = RewriteProps(data, misc_info) + common.ZipWriteStr(output_tf_zip, out_info, new_data) + if info.filename in ("BOOT/RAMDISK/default.prop", + "RECOVERY/RAMDISK/default.prop"): + write_to_temp(info.filename, info.external_attr, new_data) + elif info.filename.endswith("mac_permissions.xml"): + print "rewriting %s with new keys." % (info.filename,) + new_data = ReplaceCerts(data) + common.ZipWriteStr(output_tf_zip, out_info, new_data) + elif info.filename in ("SYSTEM/recovery-from-boot.p", + "SYSTEM/bin/install-recovery.sh"): + rebuild_recovery = True + elif (OPTIONS.replace_ota_keys and + info.filename in ("RECOVERY/RAMDISK/res/keys", + "SYSTEM/etc/security/otacerts.zip")): + # don't copy these files if we're regenerating them below + pass + elif (OPTIONS.replace_verity_private_key and + info.filename == "META/misc_info.txt"): + pass + elif (OPTIONS.replace_verity_public_key and + info.filename == "BOOT/RAMDISK/verity_key"): + pass + else: + # a non-APK file; copy it verbatim + common.ZipWriteStr(output_tf_zip, out_info, data) + + if OPTIONS.replace_ota_keys: + new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) + if new_recovery_keys: + write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys) + + if rebuild_recovery: + recovery_img = common.GetBootableImage( + "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info) + boot_img = common.GetBootableImage( + "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info) + + def output_sink(fn, data): + common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data) + + common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img, + info_dict=misc_info) + + shutil.rmtree(tmpdir) + + +def ReplaceCerts(data): + """Given a string of data, replace all occurences of a set + of X509 certs with a newer set of X509 certs and return + the updated data string.""" + for old, new in OPTIONS.key_map.iteritems(): + try: + if OPTIONS.verbose: + print " Replacing %s.x509.pem with %s.x509.pem" % (old, new) + f = open(old + ".x509.pem") + old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() + f.close() + f = open(new + ".x509.pem") + new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower() + f.close() + # Only match entire certs. + pattern = "\\b"+old_cert16+"\\b" + (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE) + if OPTIONS.verbose: + print " Replaced %d occurence(s) of %s.x509.pem with " \ + "%s.x509.pem" % (num, old, new) + except IOError as e: + if e.errno == errno.ENOENT and not OPTIONS.verbose: + continue + + print " Error accessing %s. %s. Skip replacing %s.x509.pem " \ + "with %s.x509.pem." % (e.filename, e.strerror, old, new) + + return data + + +def EditTags(tags): + """Given a string containing comma-separated tags, apply the edits + specified in OPTIONS.tag_changes and return the updated string.""" + tags = set(tags.split(",")) + for ch in OPTIONS.tag_changes: + if ch[0] == "-": + tags.discard(ch[1:]) + elif ch[0] == "+": + tags.add(ch[1:]) + return ",".join(sorted(tags)) + + +def RewriteProps(data, misc_info): + output = [] + for line in data.split("\n"): + line = line.strip() + original_line = line + if line and line[0] != '#' and "=" in line: + key, value = line.split("=", 1) + if (key in ("ro.build.fingerprint", "ro.vendor.build.fingerprint") + and misc_info.get("oem_fingerprint_properties") is None): + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint") + and misc_info.get("oem_fingerprint_properties") is not None): + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif key == "ro.bootimage.build.fingerprint": + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) + elif key == "ro.build.description": + pieces = value.split(" ") + assert len(pieces) == 5 + pieces[-1] = EditTags(pieces[-1]) + value = " ".join(pieces) + elif key == "ro.build.tags": + value = EditTags(value) + elif key == "ro.build.display.id": + # change, eg, "JWR66N dev-keys" to "JWR66N" + value = value.split() + if len(value) > 1 and value[-1].endswith("-keys"): + value.pop() + value = " ".join(value) + line = key + "=" + value + if line != original_line: + print " replace: ", original_line + print " with: ", line + output.append(line) + return "\n".join(output) + "\n" + + +def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): + try: + keylist = input_tf_zip.read("META/otakeys.txt").split() + except KeyError: + raise common.ExternalError("can't read META/otakeys.txt from input") + + extra_recovery_keys = misc_info.get("extra_recovery_keys", None) + if extra_recovery_keys: + extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" + for k in extra_recovery_keys.split()] + if extra_recovery_keys: + print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) + else: + extra_recovery_keys = [] + + mapped_keys = [] + for k in keylist: + m = re.match(r"^(.*)\.x509\.pem$", k) + if not m: + raise common.ExternalError( + "can't parse \"%s\" from META/otakeys.txt" % (k,)) + k = m.group(1) + mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") + + if mapped_keys: + print "using:\n ", "\n ".join(mapped_keys) + print "for OTA package verification" + else: + devkey = misc_info.get("default_system_dev_certificate", + "build/target/product/security/testkey") + mapped_keys.append( + OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") + print "META/otakeys.txt has no keys; using", mapped_keys[0] + + # recovery uses a version of the key that has been slightly + # predigested (by DumpPublicKey.java) and put in res/keys. + # extra_recovery_keys are used only in recovery. + + p = common.Run(["java", "-jar", + os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] + + mapped_keys + extra_recovery_keys, + stdout=subprocess.PIPE) + new_recovery_keys, _ = p.communicate() + if p.returncode != 0: + raise common.ExternalError("failed to run dumpkeys") + common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", + new_recovery_keys) + + # SystemUpdateActivity uses the x509.pem version of the keys, but + # put into a zipfile system/etc/security/otacerts.zip. + # We DO NOT include the extra_recovery_keys (if any) here. + + temp_file = cStringIO.StringIO() + certs_zip = zipfile.ZipFile(temp_file, "w") + for k in mapped_keys: + certs_zip.write(k) + certs_zip.close() + common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", + temp_file.getvalue()) + + return new_recovery_keys + +def ReplaceVerityPublicKey(targetfile_zip, key_path): + print "Replacing verity public key with %s" % key_path + with open(key_path) as f: + data = f.read() + common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data) + return data + +def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, + misc_info, key_path): + print "Replacing verity private key with %s" % key_path + current_key = misc_info["verity_key"] + original_misc_info = targetfile_input_zip.read("META/misc_info.txt") + new_misc_info = original_misc_info.replace(current_key, key_path) + common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info) + misc_info["verity_key"] = key_path + +def BuildKeyMap(misc_info, key_mapping_options): + for s, d in key_mapping_options: + if s is None: # -d option + devkey = misc_info.get("default_system_dev_certificate", + "build/target/product/security/testkey") + devkeydir = os.path.dirname(devkey) + + OPTIONS.key_map.update({ + devkeydir + "/testkey": d + "/releasekey", + devkeydir + "/devkey": d + "/releasekey", + devkeydir + "/media": d + "/media", + devkeydir + "/shared": d + "/shared", + devkeydir + "/platform": d + "/platform", + }) + else: + OPTIONS.key_map[s] = d + + +def main(argv): + + key_mapping_options = [] + + def option_handler(o, a): + if o in ("-e", "--extra_apks"): + names, key = a.split("=") + names = names.split(",") + for n in names: + OPTIONS.extra_apks[n] = key + elif o in ("-d", "--default_key_mappings"): + key_mapping_options.append((None, a)) + elif o in ("-k", "--key_mapping"): + key_mapping_options.append(a.split("=", 1)) + elif o in ("-o", "--replace_ota_keys"): + OPTIONS.replace_ota_keys = True + elif o in ("-t", "--tag_changes"): + new = [] + for i in a.split(","): + i = i.strip() + if not i or i[0] not in "-+": + raise ValueError("Bad tag change '%s'" % (i,)) + new.append(i[0] + i[1:].strip()) + OPTIONS.tag_changes = tuple(new) + elif o == "--replace_verity_public_key": + OPTIONS.replace_verity_public_key = (True, a) + elif o == "--replace_verity_private_key": + OPTIONS.replace_verity_private_key = (True, a) + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="e:d:k:ot:", + extra_long_opts=["extra_apks=", + "default_key_mappings=", + "key_mapping=", + "replace_ota_keys", + "tag_changes=", + "replace_verity_public_key=", + "replace_verity_private_key="], + extra_option_handler=option_handler) + + if len(args) != 2: + common.Usage(__doc__) + sys.exit(1) + + input_zip = zipfile.ZipFile(args[0], "r") + output_zip = zipfile.ZipFile(args[1], "w") + + misc_info = common.LoadInfoDict(input_zip) + + BuildKeyMap(misc_info, key_mapping_options) + + apk_key_map = GetApkCerts(input_zip) + CheckAllApksSigned(input_zip, apk_key_map) + + key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) + ProcessTargetFiles(input_zip, output_zip, misc_info, + apk_key_map, key_passwords) + + common.ZipClose(input_zip) + common.ZipClose(output_zip) + + add_img_to_target_files.AddImagesToTargetFiles(args[1]) + + print "done." + + +if __name__ == '__main__': + try: + main(sys.argv[1:]) + except common.ExternalError, e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) diff --git a/tools/releasetools/sign_target_files_apks.tmp b/tools/releasetools/sign_target_files_apks.tmp deleted file mode 120000 index b5ec59a25..000000000 --- a/tools/releasetools/sign_target_files_apks.tmp +++ /dev/null @@ -1 +0,0 @@ -sign_target_files_apks.py \ No newline at end of file From 69893400ec0e515adaed33577cfa92e708aae7e9 Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Wed, 7 Oct 2015 10:53:09 -0700 Subject: [PATCH 179/309] build: Fix extreneous endif. Bad merge of d22599be55f8a229ea47cd9329da4e3b23245078 introduced compile issues. Change-Id: Ibeefc4904ae52938612819347852d7c4c5a0e82d --- core/Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index 2187c1e5b..a09d52fb1 100644 --- a/core/Makefile +++ b/core/Makefile @@ -975,8 +975,6 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(RECOVERY_INSTALL_OTA_KEYS) $(call build-recoveryimage-target, $@) -endif # BOARD_CUSTOM_BOOTIMG_MK - recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch) ota_temp_root := $(PRODUCT_OUT)/ota_temp $(RECOVERY_PATCH_INSTALL): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION) From fef61322e663e5186259c5f7d83ab434555c9232 Mon Sep 17 00:00:00 2001 From: Dan Pasanen Date: Wed, 7 Oct 2015 13:20:18 -0500 Subject: [PATCH 180/309] build: remove extreneous backwhack Change-Id: Iac7b8d16008dcf4ad1e8d425340df032f8405746 --- core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/Makefile b/core/Makefile index a09d52fb1..e2e21123b 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1519,7 +1519,7 @@ built_ota_tools := \ $(call intermediates-dir-for,EXECUTABLES,applypatch,,,$(TARGET_PREFER_32_BIT))/applypatch \ $(call intermediates-dir-for,EXECUTABLES,applypatch_static,,,$(TARGET_PREFER_32_BIT))/applypatch_static \ $(call intermediates-dir-for,EXECUTABLES,check_prereq,,,$(TARGET_PREFER_32_BIT))/check_prereq \ - $(call intermediates-dir-for,EXECUTABLES,sqlite3,,,$(TARGET_PREFER_32_BIT))/sqlite3 \ + $(call intermediates-dir-for,EXECUTABLES,sqlite3,,,$(TARGET_PREFER_32_BIT))/sqlite3 ifeq ($(TARGET_ARCH),arm64) built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater,,,32)/updater From 39753a21ae3cbee017bf8b26ea93e5701a16dc68 Mon Sep 17 00:00:00 2001 From: Brian Chu Date: Tue, 6 Oct 2015 23:49:43 -0700 Subject: [PATCH 181/309] Fix OSX support for new CLI Tools (starting with 7.1) To build on OSX with late-2015 CLI Tools, the build system must include C++ headers from a new location. Mid-2015 CLI Tools and older can still be used, so the older location will also be kept. Change-Id: I422d28cb41ab57fcc816538a822df8aecdf7d9d6 --- core/combo/HOST_darwin-x86.mk | 6 ++++++ core/combo/HOST_darwin-x86_64.mk | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/core/combo/HOST_darwin-x86.mk b/core/combo/HOST_darwin-x86.mk index 26e206925..5992852ae 100644 --- a/core/combo/HOST_darwin-x86.mk +++ b/core/combo/HOST_darwin-x86.mk @@ -43,7 +43,13 @@ $(combo_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG := $($(combo_2nd_arch_prefix)HO $(combo_2nd_arch_prefix)HOST_AR := $(AR) $(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -isysroot $(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) -DMACOSX_DEPLOYMENT_TARGET=$(mac_sdk_version) +ifeq (,$(wildcard $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1)) +# libc++ header locations for XCode CLT 7.1+ +$(combo_2nd_arch_prefix)HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/usr/include/c++/v1 +else +# libc++ header locations for pre-XCode CLT 7.1+ $(combo_2nd_arch_prefix)HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1 +endif $(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -isysroot $(mac_sdk_root) -Wl,-syslibroot,$(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) $(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -fPIC -funwind-tables diff --git a/core/combo/HOST_darwin-x86_64.mk b/core/combo/HOST_darwin-x86_64.mk index 0efa78fe5..324b3fd45 100644 --- a/core/combo/HOST_darwin-x86_64.mk +++ b/core/combo/HOST_darwin-x86_64.mk @@ -43,7 +43,13 @@ HOST_TOOLCHAIN_FOR_CLANG := $(HOST_TOOLCHAIN_ROOT) HOST_AR := $(AR) HOST_GLOBAL_CFLAGS += -isysroot $(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) -DMACOSX_DEPLOYMENT_TARGET=$(mac_sdk_version) +ifeq (,$(wildcard $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1)) +# libc++ header locations for XCode CLT 7.1+ +HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/usr/include/c++/v1 +else +# libc++ header locations for pre-XCode CLT 7.1+ HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1 +endif HOST_GLOBAL_LDFLAGS += -isysroot $(mac_sdk_root) -Wl,-syslibroot,$(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) HOST_GLOBAL_CFLAGS += -fPIC -funwind-tables From 850ca0228115580aa92a53bff8c214c23935e5f4 Mon Sep 17 00:00:00 2001 From: Brandon McAnsh Date: Wed, 7 Oct 2015 20:42:38 -0400 Subject: [PATCH 182/309] core: Add recovery image target Change-Id: I436ff8e91e9549fd4b61a2bd2796df9e5b1031dd Signed-off-by: Brandon McAnsh --- core/Makefile | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/core/Makefile b/core/Makefile index e2e21123b..c921da773 100644 --- a/core/Makefile +++ b/core/Makefile @@ -975,6 +975,18 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(RECOVERY_INSTALL_OTA_KEYS) $(call build-recoveryimage-target, $@) +ifndef BOARD_CUSTOM_BOOTIMG_MK +$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) \ + $(recovery_kernel) + $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ +ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)) + $(BOOT_SIGNER) /recovery $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $@ +endif + $(hide) $(call assert-max-image-size,$@,$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) + @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} + +endif # BOARD_CUSTOM_BOOTIMG_MK + recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch) ota_temp_root := $(PRODUCT_OUT)/ota_temp $(RECOVERY_PATCH_INSTALL): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION) From 3b51b49fe2f7c85e96de68d25787e19fdb43df06 Mon Sep 17 00:00:00 2001 From: Brandon McAnsh Date: Fri, 2 Oct 2015 14:05:52 -0400 Subject: [PATCH 183/309] Add Xcode 7 (10.11) SDK support to mac supported list * Allows lunch and a build to start Change-Id: I9cd4b851cf05390b80e1b8d292643e59e636fe3c Signed-off-by: Brandon McAnsh --- core/combo/mac_version.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/combo/mac_version.mk b/core/combo/mac_version.mk index d18e4c590..e9f06968c 100644 --- a/core/combo/mac_version.mk +++ b/core/combo/mac_version.mk @@ -11,7 +11,7 @@ build_mac_version := $(shell sw_vers -productVersion) # Caution: Do not add 10.10 to this list until the prebuilts/darwin-x86 toolchains are updated. # In the meantime, setting mac_sdk_version to 10.9 works on Yosemite (verified on 10.10.1). -mac_sdk_versions_supported := 10.6 10.7 10.8 10.9 +mac_sdk_versions_supported := 10.6 10.7 10.8 10.9 10.11 ifneq ($(strip $(MAC_SDK_VERSION)),) mac_sdk_version := $(MAC_SDK_VERSION) ifeq ($(filter $(mac_sdk_version),$(mac_sdk_versions_supported)),) From 916be2f85cfe8ea890ab20ed32a015f5442675a5 Mon Sep 17 00:00:00 2001 From: Stephen Bird Date: Thu, 8 Oct 2015 02:05:06 -0700 Subject: [PATCH 184/309] Fix syntax issues and recovery generation Change-Id: I2394ffb16fa06421c5f0cc5f5ee77db72d3399ba --- core/Makefile | 12 ++++++------ tools/releasetools/common.py | 2 +- tools/releasetools/ota_from_target_files | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/core/Makefile b/core/Makefile index c921da773..18c520c76 100644 --- a/core/Makefile +++ b/core/Makefile @@ -939,6 +939,7 @@ define build-recoveryimage-target $(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/* $(hide) cp -rf $(recovery_resources_common)/* $(TARGET_RECOVERY_ROOT_OUT)/res $(hide) cp -f $(recovery_font) $(TARGET_RECOVERY_ROOT_OUT)/res/images/font.png + $(hide) $(foreach item,$(recovery_root_private), \ cp -rf $(item) $(TARGET_RECOVERY_OUT)/) $(hide) $(foreach item,$(recovery_resources_private), \ cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/) @@ -947,12 +948,7 @@ define build-recoveryimage-target $(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop - $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) - -$(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) - @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} - $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ - + $(hide) $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) @@ -964,6 +960,10 @@ $(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} endef +$(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) + @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} + $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ + $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(INSTALLED_RAMDISK_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 14a5f409b..1753ec046 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -1383,7 +1383,7 @@ def _CheckFirstBlock(self, script): "squashfs": "EMMC", "ext2": "EMMC", "ext3": "EMMC", - "vfat": "EMMC" } + "vfat": "EMMC" } def GetTypeAndDevice(mount_point, info): diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 17e5e06e5..c8fa8553d 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -1639,7 +1639,7 @@ def main(argv): "no_fallback_to_full", "backup=", "override_device=", - "override_prop="], + "override_prop=" ], extra_option_handler=option_handler) if len(args) != 2: From 1df10d6d873e9163df3f300021708b0e9957c558 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Wed, 2 Sep 2015 11:01:31 -0700 Subject: [PATCH 185/309] build: Allow QC platforms to share/customize HAL variants * Allow devices to override HAL variant using QCOM_HARDWARE_VARIANT * 8909 and 8916 share HALs by default * 8610, 8226, 8974 share HALs by default * 8992, 8994 share HALs by default Change-Id: Idfbdc405a35c5ba518fc71c8b691fc177ddf9068 (cherry picked from commit 338b399a2976993f0758c2f4e61620e987f4cb19) --- core/qcom_target.mk | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 825ddbb1b..23e086e29 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -36,15 +36,31 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags) 2ND_CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags) -$(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(TARGET_BOARD_PLATFORM)) + ifeq ($(QCOM_HARDWARE_VARIANT),) + ifneq ($(filter msm8610 msm8226 msm8974,$(TARGET_BOARD_PLATFORM)),) + QCOM_HARDWARE_VARIANT := msm8974 + else + ifneq ($(filter msm8909 msm8916,$(TARGET_BOARD_PLATFORM)),) + QCOM_HARDWARE_VARIANT := msm8916 + else + ifneq ($(filter msm8992 msm8994,$(TARGET_BOARD_PLATFORM)),) + QCOM_HARDWARE_VARIANT := msm8994 + else + QCOM_HARDWARE_VARIANT := $(TARGET_BOARD_PLATFORM) + endif + endif + endif + endif + +$(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(QCOM_HARDWARE_VARIANT)) ifeq ($(USE_DEVICE_SPECIFIC_CAMERA),true) $(call project-set-path,qcom-camera,$(TARGET_DEVICE_DIR)/camera) else $(call qcom-set-path-variant,CAMERA,camera) endif -$(call project-set-path,qcom-display,hardware/qcom/display-caf/$(TARGET_BOARD_PLATFORM)) +$(call project-set-path,qcom-display,hardware/qcom/display-caf/$(QCOM_HARDWARE_VARIANT)) $(call qcom-set-path-variant,GPS,gps) -$(call project-set-path,qcom-media,hardware/qcom/media-caf/$(TARGET_BOARD_PLATFORM)) +$(call project-set-path,qcom-media,hardware/qcom/media-caf/$(QCOM_HARDWARE_VARIANT)) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) else From 19a3cfa84a4d6695fb15b165aa1167edf6cdb6a8 Mon Sep 17 00:00:00 2001 From: Scott Mertz Date: Wed, 29 Jul 2015 14:39:13 -0700 Subject: [PATCH 186/309] build: kernel: arm64 uses Image.gz as the target Change-Id: Icc194d2a16f9ffcf5803226fc1f2a5b091b4d861 (cherry picked from commit 24ddce302503e9408c47bd571a0d510546dcfd1a) --- core/tasks/kernel.mk | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 764532cfa..41886e824 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -55,7 +55,11 @@ else ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true) TARGET_PREBUILT_INT_KERNEL_TYPE := Image else - TARGET_PREBUILT_INT_KERNEL_TYPE := zImage + ifeq ($(TARGET_KERNEL_ARCH),arm64) + TARGET_PREBUILT_INT_KERNEL_TYPE := Image.gz + else + TARGET_PREBUILT_INT_KERNEL_TYPE := zImage + endif endif endif From 77a91fd38bc9995cb103fca04c69c205fe69fa34 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Mon, 11 May 2015 02:57:35 +0300 Subject: [PATCH 187/309] kernel: Fix kernelconfig build * Copy the generated defconfig to the correct directory Change-Id: Ia8e259946d67501675dd5a3433165d54df6c13c5 --- core/tasks/kernel.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 41886e824..78661c420 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -256,7 +256,7 @@ kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) menuconfig env KCONFIG_NOTIMESTAMP=true \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) savedefconfig - cp $(KERNEL_OUT)/defconfig kernel/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) + cp $(KERNEL_OUT)/defconfig $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) endif # FULL_KERNEL_BUILD From 59d142a6d83b58b9fb1a4dddc7216b009ea8df30 Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Fri, 8 May 2015 14:42:25 -0700 Subject: [PATCH 188/309] build/core: Create means of ignoring subdir layer for packages. An external resource package that acts as a secondary framework resource should be presented in system/framework similarily to the framework-res module. Change-Id: Ie4110a184cd7262035110a6a04bb7ea91e7a42b0 --- core/base_rules.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/base_rules.mk b/core/base_rules.mk index 0f2accc16..202398a95 100644 --- a/core/base_rules.mk +++ b/core/base_rules.mk @@ -184,7 +184,7 @@ ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE)) # Apk and its attachments reside in its own subdir. ifeq ($(LOCAL_MODULE_CLASS),APPS) # framework-res.apk doesn't like the additional layer. - ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true) + ifeq ($(filter true,$(LOCAL_NO_STANDARD_LIBRARIES) $(LOCAL_IGNORE_SUBDIR)),) my_module_path := $(my_module_path)/$(LOCAL_MODULE) endif endif From f86e001a5d53d859d9e69adc1cb4e906640ee24b Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Wed, 13 May 2015 00:52:31 -0700 Subject: [PATCH 189/309] build: Add IGNORE_SUBDIR to clear vars. Change-Id: Icc539d6d4e0a2d5a025416841dc05fe6bcb6199f --- core/clear_vars.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/clear_vars.mk b/core/clear_vars.mk index 076fb78e9..7847ae927 100644 --- a/core/clear_vars.mk +++ b/core/clear_vars.mk @@ -293,6 +293,8 @@ LOCAL_MODULE_STEM_64:= LOCAL_CLANG_32:= LOCAL_CLANG_64:= +LOCAL_IGNORE_SUBDIR:= + # Trim MAKEFILE_LIST so that $(call my-dir) doesn't need to # iterate over thousands of entries every time. # Leave the current makefile to make sure we don't break anything From 3b3c7267750ba97f15bd880faf79438d1dd83e44 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Sun, 28 Sep 2014 01:55:24 +0100 Subject: [PATCH 190/309] Add support for mediatek platforms This includes optional support for building the kernel with mediatek's build system, which is usually included with OEM source drops for this platform. (enabled by BOARD_USES_MTK_KERNELBUILD:=true) Change-Id: I69fb50aa17d9c171bf8a7c220a0707c4bc570733 --- core/main.mk | 3 +++ core/mtk_utils.mk | 5 +++++ core/tasks/kernel.mk | 13 +++++++++++++ 3 files changed, 21 insertions(+) create mode 100755 core/mtk_utils.mk diff --git a/core/main.mk b/core/main.mk index 9e51541d5..8643ee3d4 100644 --- a/core/main.mk +++ b/core/main.mk @@ -105,6 +105,9 @@ include $(BUILD_SYSTEM)/cleanbuild.mk # Bring in Qualcomm helper macros include $(BUILD_SYSTEM)/qcom_utils.mk +# Bring in Mediatek helper macros too +include $(BUILD_SYSTEM)/mtk_utils.mk + # Include the google-specific config -include vendor/google/build/config.mk diff --git a/core/mtk_utils.mk b/core/mtk_utils.mk new file mode 100755 index 000000000..48fd66054 --- /dev/null +++ b/core/mtk_utils.mk @@ -0,0 +1,5 @@ +# Board platforms lists to be used for +# TARGET_BOARD_PLATFORM specific featurization +MTK_BOARD_PLATFORMS := mt6592 +MTK_BOARD_PLATFORMS += mt6582 +MTK_BOARD_PLATFORMS += mt6572 diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 78661c420..6c00bf786 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -144,6 +144,19 @@ else endif endif +ifeq ($(BOARD_HAS_MTK_HARDWARE),true) + ifeq ($(BOARD_USES_MTK_KERNELBUILD),true) + include $(CLEAR_VARS) + $(shell rm -f $(TARGET_PREBUILT_INT_KERNEL)) + FULL_KERNEL_BUILD := false + PROJECT_NAME := $(TARGET_KERNEL_CONFIG) +$(TARGET_PREBUILT_INT_KERNEL): + cd $(TARGET_KERNEL_SOURCE) && env -i PATH=$(PATH) ./makeMtk -t -o=OUT_DIR=$(OUT_DIR),TARGET_BUILD_VARIANT=$(TARGET_BUILD_VARIANT) $(PROJECT_NAME) r k + -cd $(TARGET_KERNEL_SOURCE) && git clean -fd + + endif +endif + ifeq ($(FULL_KERNEL_BUILD),true) KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr From 334e3bc2cca607d7441e7608d9f9c8a61555c338 Mon Sep 17 00:00:00 2001 From: Scott Mertz Date: Mon, 20 Apr 2015 10:40:21 -0700 Subject: [PATCH 191/309] Colorize more target file output Change-Id: I25aee19e9a5a06eeeaa4040803c064a140a86778 --- core/dex_preopt_odex_install.mk | 2 +- core/prebuilt_internal.mk | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk index 73c814667..e6b4f3cfa 100644 --- a/core/dex_preopt_odex_install.mk +++ b/core/dex_preopt_odex_install.mk @@ -108,7 +108,7 @@ $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS) $(installed_odex) : $(dir $(LOCAL_INSTALLED_MODULE))%$(notdir $(word 1,$(installed_odex))) \ : $(dir $(LOCAL_BUILT_MODULE))%$(notdir $(word 1,$(built_odex))) \ | $(ACP) - @echo "Install: $@" + @echo -e ${CL_CYN}"Install: $@"${CL_RST} $(copy-file-to-target) endif diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk index 04063537f..585ead20f 100644 --- a/core/prebuilt_internal.mk +++ b/core/prebuilt_internal.mk @@ -253,7 +253,7 @@ $(built_apk_splits) : $(built_module_path)/%.apk : $(my_src_dir)/%.apk | $(ACP) # Rules to install the split apks. $(installed_apk_splits) : $(my_module_path)/%.apk : $(built_module_path)/%.apk | $(ACP) - @echo "Install: $@" + @echo -e ${CL_CYN}"Install: $@"${CL_RST} $(copy-file-to-new-target) # Register the additional built and installed files. From b23c1830679ae240e5a7cc1873245cfa66ca8bca Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Tue, 21 Apr 2015 11:50:36 -0700 Subject: [PATCH 192/309] build: Add a couple more custom pathmap variables Change-Id: Ie44b9844c6d82dd73d1ea1f16b565f8bb06652c1 --- core/qcom_target.mk | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 23e086e29..6922b46db 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -7,6 +7,12 @@ endef define ril-set-path-variant $(call project-set-path-variant,ril,TARGET_RIL_VARIANT,hardware/$(1)) endef +define gps-hal-set-path-variant +$(call project-set-path-variant,gps-hal,TARGET_GPS_HAL_PATH,$(1)) +endef +define loc-api-set-path-variant +$(call project-set-path-variant,loc-api,TARGET_LOC_API_PATH,$(1)) +endef ifeq ($(BOARD_USES_QCOM_HARDWARE),true) @@ -63,6 +69,8 @@ $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media-caf/$(QCOM_HARDWARE_VARIANT)) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) +$(call loc-api-set-path-variant,vendor/qcom/opensource/location) +$(call gps-hal-set-path-variant,hardware/qcom/gps) else $(call project-set-path,qcom-audio,hardware/qcom/audio/default) $(call qcom-set-path-variant,CAMERA,camera) @@ -71,4 +79,6 @@ $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media/default) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) +$(call loc-api-set-path-variant,vendor/qcom/opensource/location) +$(call gps-hal-set-path-variant,hardware/qcom/gps) endif From 6e14d89690c9de4d640abb0b05e83b31fbd659bf Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Mon, 2 Jun 2014 16:49:59 -0700 Subject: [PATCH 193/309] Generate extra userdata partition if needed * To support variants of devices which may come in 16/32/64GB variants. Change-Id: I74c32d8316f0450a1445fe95a95e5cabb7a9dd1b (cherry picked from commit aae837f9cbcff739cb660016a6d1abd7ef4663db) --- core/Makefile | 2 + core/generate_extra_images.mk | 29 ++++++++++++++ tools/releasetools/add_img_to_target_files.py | 38 +++++++++++++++++++ tools/releasetools/build_image.py | 13 ++++++- tools/releasetools/common.py | 4 +- 5 files changed, 82 insertions(+), 4 deletions(-) diff --git a/core/Makefile b/core/Makefile index 18c520c76..665adb510 100644 --- a/core/Makefile +++ b/core/Makefile @@ -777,6 +777,8 @@ $(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "system_squashfs_comp $(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "system_squashfs_compressor_opt=$(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1)) $(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1)) $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1)) +$(if $(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE),$(hide) echo "userdataextra_size=$(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE)" >> $(1)) +$(if $(BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME),$(hide) echo "userdataextra_name=$(BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME)" >> $(1)) $(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1)) $(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1)) $(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1)) diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk index 8cd18fd0d..c25366856 100644 --- a/core/generate_extra_images.mk +++ b/core/generate_extra_images.mk @@ -139,6 +139,35 @@ ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_1G_USERDATAIMAGE_TARGET) endif +#---------------------------------------------------------------------- +# Generate extra userdata images (for variants with multiple mmc sizes) +#---------------------------------------------------------------------- +ifneq ($(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE),) + +ifndef BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME + BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME := extra +endif + +BUILT_USERDATAEXTRAIMAGE_TARGET := $(PRODUCT_OUT)/userdata_$(BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME).img + +define build-userdataextraimage-target + $(call pretty,"Target EXTRA userdata fs image: $(INSTALLED_USERDATAEXTRAIMAGE_TARGET)") + @mkdir -p $(TARGET_OUT_DATA) + $(hide) $(MKEXTUSERIMG) -s $(TARGET_OUT_DATA) $@ ext4 data $(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE) + $(hide) chmod a+r $@ + $(hide) $(call assert-max-image-size,$@,$(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE),yaffs) +endef + +INSTALLED_USERDATAEXTRAIMAGE_TARGET := $(BUILT_USERDATAEXTRAIMAGE_TARGET) +$(INSTALLED_USERDATAEXTRAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET) + $(build-userdataextraimage-target) + +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_USERDATAEXTRAIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_USERDATAEXTRAIMAGE_TARGET) + +endif + + #---------------------------------------------------------------------- # Generate NAND images #---------------------------------------------------------------------- diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py index f2bf1e177..41f244934 100755 --- a/tools/releasetools/add_img_to_target_files.py +++ b/tools/releasetools/add_img_to_target_files.py @@ -207,6 +207,42 @@ def AddUserdata(output_zip, prefix="IMAGES/"): os.rmdir(temp_dir) +def AddUserdataExtra(output_zip): + """Create extra userdata image and store it in output_zip.""" + + image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, + "data_extra") + # If no userdataextra_size is provided for extfs, skip userdata_extra.img. + if (image_props.get("fs_type", "").startswith("ext") and + not image_props.get("partition_size")): + return + + extra_name = image_props.get("partition_name", "extra") + + print "creating userdata_%s.img..." % extra_name + + # The name of the directory it is making an image out of matters to + # mkyaffs2image. So we create a temp dir, and within it we create an + # empty dir named "data", and build the image from that. + temp_dir = tempfile.mkdtemp() + user_dir = os.path.join(temp_dir, "data") + os.mkdir(user_dir) + img = tempfile.NamedTemporaryFile() + + fstab = OPTIONS.info_dict["fstab"] + if fstab: + image_props["fs_type" ] = fstab["/data"].fs_type + succ = build_image.BuildImage(user_dir, image_props, img.name) + assert succ, "build userdata_%s.img image failed" % extra_name + + # Disable size check since this fetches original data partition size + #common.CheckSize(img.name, "userdata_extra.img", OPTIONS.info_dict) + output_zip.write(img.name, "userdata_%s.img" % extra_name) + img.close() + os.rmdir(user_dir) + os.rmdir(temp_dir) + + def AddCache(output_zip, prefix="IMAGES/"): """Create an empty cache image and store it in output_zip.""" @@ -312,6 +348,8 @@ def banner(s): AddVendor(output_zip) banner("userdata") AddUserdata(output_zip) + banner("extrauserdata") + AddUserdataExtra(output_zip) banner("cache") AddCache(output_zip) diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py index 2917f11c0..b83379cf1 100755 --- a/tools/releasetools/build_image.py +++ b/tools/releasetools/build_image.py @@ -264,8 +264,12 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None): if "extfs_sparse_flag" in prop_dict: build_command.append(prop_dict["extfs_sparse_flag"]) #run_fsck = True - build_command.extend([in_dir, out_file, fs_type, - prop_dict["mount_point"]]) + if "is_userdataextra" in prop_dict: + build_command.extend([in_dir, out_file, fs_type, + "data"]) + else: + build_command.extend([in_dir, out_file, fs_type, + prop_dict["mount_point"]]) build_command.append(prop_dict["partition_size"]) if "journal_size" in prop_dict: build_command.extend(["-j", prop_dict["journal_size"]]) @@ -440,6 +444,11 @@ def copy_prop(src_p, dest_p): copy_prop("fs_type", "fs_type") copy_prop("userdata_fs_type", "fs_type") copy_prop("userdata_size", "partition_size") + elif mount_point == "data_extra": + copy_prop("fs_type", "fs_type") + copy_prop("userdataextra_size", "partition_size") + copy_prop("userdataextra_name", "partition_name") + d["is_userdataextra"] = True elif mount_point == "cache": copy_prop("cache_fs_type", "fs_type") copy_prop("cache_size", "partition_size") diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 1753ec046..9f2835a89 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -616,8 +616,8 @@ def CheckSize(data, target, info_dict): fs_type = None limit = None if info_dict["fstab"]: - if mount_point == "/userdata": - mount_point = "/data" + if mount_point == "/userdata_extra": mount_point = "/data" + if mount_point == "/userdata": mount_point = "/data" p = info_dict["fstab"][mount_point] fs_type = p.fs_type device = p.device From 92c2f7abb3ce0a08c23cd2ffb816be0d4668f489 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Thu, 30 Apr 2015 22:19:48 +0100 Subject: [PATCH 194/309] envsetup: add mk_timer this wraps around any method call, mainly for 'make'. mka needs this on linux, since ionice and schedtool don't respect functions Change-Id: If8cdd235ed9eba377dd90ab8b12e93036a377ea5 --- envsetup.sh | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index d7c9e0f8a..b961cc29a 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -2001,7 +2001,7 @@ function mka() { make -j `sysctl hw.ncpu|cut -d" " -f2` "$@" ;; *) - schedtool -B -n 1 -e ionice -n 1 make -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@" + mk_timer schedtool -B -n 1 -e ionice -n 1 make -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@" ;; esac } @@ -2296,10 +2296,10 @@ function get_make_command() echo command make } -function make() +function mk_timer() { local start_time=$(date +"%s") - $(get_make_command) "$@" + $@ local ret=$? local end_time=$(date +"%s") local tdiff=$(($end_time-$start_time)) @@ -2334,6 +2334,11 @@ function make() return $ret } +function make() +{ + mk_timer $(get_make_command) "$@" +} + if [ "x$SHELL" != "x/bin/bash" ]; then case `ps -o command -p $$` in *bash*) From a8f82b65dca0f4e1509ff5169bceff3652eb4cc9 Mon Sep 17 00:00:00 2001 From: Hashcode Date: Mon, 1 Jun 2015 15:32:09 -0700 Subject: [PATCH 195/309] build: avoid recovery patch if device setup can't do it correctly In the case of Amazon OMAP4's there is a 2nd-bootloader which is inserted at a fixed location in both boot.img and recovery.img. If this 2nd-bootloader is not present, it puts the device in a non-booting state requiring the user to recover the device using fastboot tools. Also, due to the extra binary information in the image, imgdiff hangs during build. For devices like this, allow bypass of the recovery patch generatation. NOTE: In previous versions of CM the BOARD_CUSTOM_BOOTIMG_MK flag accomplished this, but was never forward ported to cm-12.0 and on. Use a new board flag here for clarity as most custom bootimg makefiles can still process a patch for recovery. https://github.com/CyanogenMod/android_build/blob/cm-11.0/core/Makefile#L935 Change-Id: I9284ee227358224e3add9862db5c1fbd0f5bd226 --- core/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/Makefile b/core/Makefile index 665adb510..0e9d88c64 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1174,7 +1174,11 @@ SYSTEMIMAGE_SOURCE_DIR := $(TARGET_OUT) # image size check calculation. ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),) intermediates := $(call intermediates-dir-for,PACKAGING,recovery_patch) +ifndef BOARD_CANT_BUILD_RECOVERY_FROM_BOOT_PATCH RECOVERY_FROM_BOOT_PATCH := $(intermediates)/recovery_from_boot.p +else +RECOVERY_FROM_BOOT_PATCH := +endif $(RECOVERY_FROM_BOOT_PATCH): $(INSTALLED_RECOVERYIMAGE_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ $(HOST_OUT_EXECUTABLES)/imgdiff \ From 531045e07ebeb6aac5dbb8d0c8fb12edba25cc6f Mon Sep 17 00:00:00 2001 From: Anthony King Date: Thu, 30 Apr 2015 22:57:08 +0100 Subject: [PATCH 196/309] lunch: sort the choices This will sort the lunch array when lunch is called, giving us a consistent list of devices, which is easier to search through Change-Id: I80854e8b949950cbc928121a184b64fc3b8164c8 envsetup: lunch: always sort the menu this fixes the use case of 'lunch 34' straight after envsetup Change-Id: I19d45016edf254131015aab8476bc815bec9abe4 --- envsetup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/envsetup.sh b/envsetup.sh index b961cc29a..e6c83efc2 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -584,6 +584,7 @@ alias bib=breakfast function lunch() { local answer + LUNCH_MENU_CHOICES=($(for l in ${LUNCH_MENU_CHOICES[@]}; do echo "$l"; done | sort)) if [ "$1" ] ; then answer=$1 From 3123fb3f798a7e883f54832f719e396737e68643 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Thu, 30 Apr 2015 23:16:10 +0100 Subject: [PATCH 197/309] envsetup: cleanup the full list echo each one and stick it in columns. It was unreadable before Change-Id: I99c50595dcf35f0f03d5d3d6b01043ad8a8306d2 --- envsetup.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index e6c83efc2..bdb06429a 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -43,12 +43,9 @@ Environemnt options: Look at the source to view more functions. The complete list is: EOF T=$(gettop) - local A - A="" for i in `cat $T/build/envsetup.sh | sed -n "/^[ \t]*function /s/function \([a-z_]*\).*/\1/p" | sort | uniq`; do - A="$A $i" - done - echo $A + echo "$i" + done | column } # Get the value of a build variable as an absolute path. From 8fd401fc6a22dae546cea53a070bc137751dc1a1 Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Sat, 6 Jun 2015 22:22:05 -0400 Subject: [PATCH 198/309] Remove LatinIME as it is included per target in vendor/slim Change-Id: Ibce4a244ec0653a7c7798d864047ac667866a1dc --- target/product/core.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/target/product/core.mk b/target/product/core.mk index d453303ff..3af85af0c 100644 --- a/target/product/core.mk +++ b/target/product/core.mk @@ -39,7 +39,6 @@ PRODUCT_PACKAGES += \ InputDevices \ KeyChain \ Keyguard \ - LatinIME \ Launcher2 \ ManagedProvisioning \ PicoTts \ From aefb9c33e1e04d314dcb45f249a47d8a60d8e602 Mon Sep 17 00:00:00 2001 From: Diogo Ferreira Date: Fri, 3 Apr 2015 12:39:37 +0100 Subject: [PATCH 199/309] build: Add the factorypackage target The build system generates several artifacts, among them update zips, OTAs and update packages for fastboot. Shipping devices typically need extra-special packages in order to fulfill factory automation requirements. This patches adds a "factorypackage" target that can be customized by devices by providing a script that converts target files into the desired format. The script path should be set into TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT. Change-Id: I993f12766c96274f096c5f6c6da5aaa32394abbc --- core/Makefile | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/core/Makefile b/core/Makefile index 0e9d88c64..5d53d6699 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1818,6 +1818,35 @@ bacon: otapackage $(hide) $(MD5SUM) $(SLIM_TARGET_PACKAGE) > $(SLIM_TARGET_PACKAGE).md5sum @echo -e ${CL_CYN}"Package Complete: $(SLIM_TARGET_PACKAGE)"${CL_RST} +# ----------------------------------------------------------------- +# The factory package + +name := $(TARGET_PRODUCT)-factory-$(FILE_NAME_TAG) + +INTERNAL_FACTORY_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip + +ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),) +# default to common dir for device vendor +$(INTERNAL_FACTORY_PACKAGE_TARGET): extensions := $(TARGET_DEVICE_DIR)/../common +else +$(INTERNAL_FACTORY_PACKAGE_TARGET): extensions := $(TARGET_RELEASETOOLS_EXTENSIONS) +endif + +$(INTERNAL_FACTORY_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) + @echo -e ${CL_YLW}"Package:"${CL_RST}" $@" + if [ -z $(TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT) ]; then \ + echo "Error: Factory script is not defined by target"; \ + exit 1; \ + fi + MKBOOTIMG=$(BOARD_CUSTOM_BOOTIMG_MK) \ + $(TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT) -v \ + -s $(extensions) \ + -p $(HOST_OUT) \ + $(BUILT_TARGET_FILES_PACKAGE) $@ + +.PHONY: factorypackage +factorypackage: $(INTERNAL_FACTORY_PACKAGE_TARGET) + endif # recovery_fstab is defined endif # TARGET_NO_KERNEL != true endif # TARGET_DEVICE != generic* From 99c4f984bbfd218cc3280c3824bcf9599d050cf4 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Tue, 9 Jun 2015 00:11:29 +0530 Subject: [PATCH 200/309] otasigcheck: Extract the key * commit b110c751b181423d268531c624db212d2d81e816 "build: ota: Support for install tools in /tmp/install" erroneously removed the line extracting the releasekey, making the script a no-op as it couldn't find a key to compare against. Change-Id: I0dc5d15dbf4b0531de4df9e62a5bd47ec463c2a1 --- tools/releasetools/edify_generator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 740eec425..d7ecb98ec 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -151,6 +151,7 @@ def RunBackup(self, command): self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command)) def ValidateSignatures(self, command): + self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");') # Exit code 124 == abort. run_program returns raw, so left-shift 8bit self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') From 5913cc31cb250b3ec532883598a67b6f078116f5 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Sun, 29 Dec 2013 12:57:20 -0600 Subject: [PATCH 201/309] envsetup: repolastsync Prints date and time of last repo sync. Example: Last repo sync: 11 Jun 2015, 21:54:59 CDT / 12 Jun 2015, 02:54:59 UTC Change-Id: I4280b500e58237479194ad2e230ed7466db87755 --- envsetup.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index bdb06429a..7ec74915c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -30,6 +30,7 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - mka: Builds using SCHED_BATCH on all processors - mkap: Builds the module(s) using mka and pushes them to the device. - cmka: Cleans and builds using mka. +- repolastsync: Prints date and time of last repo sync. - reposync: Parallel repo sync using ionice and SCHED_BATCH - installboot: Installs a boot.img to the connected device. - installrecovery: Installs a recovery.img to the connected device. @@ -2024,6 +2025,13 @@ function cmka() { fi } +function repolastsync() { + RLSPATH="$ANDROID_BUILD_TOP/.repo/.repo_fetchtimes.json" + RLSLOCAL=$(date -d "$(stat -c %z $RLSPATH)" +"%e %b %Y, %T %Z") + RLSUTC=$(date -d "$(stat -c %z $RLSPATH)" -u +"%e %b %Y, %T %Z") + echo "Last repo sync: $RLSLOCAL / $RLSUTC" +} + function reposync() { case `uname -s` in Darwin) From 4947b196e2fecf0d5a89b38163075c803fc6051b Mon Sep 17 00:00:00 2001 From: "Howard M. Harte" Date: Fri, 22 May 2015 09:54:20 -0700 Subject: [PATCH 202/309] minnow_max: search for cm.mk in additional directories Intel's product configuration files are structured slightly differentily than others. Device-specific configuration is nested one additional level, as follows: device/intel/// Change-Id: I78b02978dc759b94024e5c5533d1108ac2634549 Conflicts: core/product_config.mk --- core/product_config.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/product_config.mk b/core/product_config.mk index 20805cbb6..1967e5222 100644 --- a/core/product_config.mk +++ b/core/product_config.mk @@ -179,9 +179,9 @@ include $(BUILD_SYSTEM)/node_fns.mk include $(BUILD_SYSTEM)/product.mk include $(BUILD_SYSTEM)/device.mk -# A SLIM build needs only the CM product makefiles. +# A SLIM build needs only the SLIM product makefiles. ifneq ($(SLIM_BUILD),) - all_product_configs := $(shell ls device/*/$(SLIM_BUILD)/slim.mk) + all_product_configs := $(shell find device -path "*/$(SLIM_BUILD)/slim.mk") else ifneq ($(strip $(TARGET_BUILD_APPS)),) # An unbundled app build needs only the core product makefiles. From cee517213fc989567c7c65119c53cb38b939f9df Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Wed, 26 Dec 2012 10:53:58 +0530 Subject: [PATCH 203/309] envsetup: push to connected device only if it is $CM_BUILD Change-Id: Icce057cb31d6fbc9abb0292311c88c32fea3540c Conflicts: envsetup.sh --- envsetup.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/envsetup.sh b/envsetup.sh index 7ec74915c..9ce1e23ca 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -749,6 +749,8 @@ function eat() done echo "Device Found.." fi + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); + then # if adbd isn't root we can't write to /cache/recovery/ adb root sleep 1 @@ -768,6 +770,9 @@ EOF return 1 fi return $? + else + echo "The connected device does not appear to be $SLIM_BUILD, run away!" + fi } function gettop @@ -2184,6 +2189,8 @@ function dopush() echo "Device Found." fi + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); + then # retrieve IP and PORT info if we're using a TCP connection TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \ | head -1 | awk '{print $1}') @@ -2245,6 +2252,9 @@ function dopush() fi rm -f $OUT/.log return 0 + else + echo "The connected device does not appear to be $SLIM_BUILD, run away!" + fi } alias mmp='dopush mm' From 99d5d9617f8ed894f688de05169fc127be24c77f Mon Sep 17 00:00:00 2001 From: Chris Sarbora Date: Wed, 3 Jun 2015 17:25:30 -0700 Subject: [PATCH 204/309] Allow dopush to load binaries onto the "wrong" device * Lets solitary projects be built+pushed quickly even without a full build * Affects mmp, mmmp, mkap as well Change-Id: I53594bcbece9f9458d28c3ea930453c14df95fe8 Conflicts: envsetup.sh --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index 9ce1e23ca..391620b2f 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -2189,7 +2189,7 @@ function dopush() echo "Device Found." fi - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); + if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD") || [ "$FORCE_PUSH" == "true" ]; then # retrieve IP and PORT info if we're using a TCP connection TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \ From 75574a4552f8c65f1d6ea98bebae381deee38568 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 10 Jul 2015 02:31:24 -0700 Subject: [PATCH 205/309] build: Fix device detection for a few of the envsetup commands * Edge case where a certain developer ended up with a blank system partition in recovery. Wonder how that happened? Change-Id: I17854a53bf18d07c98c4b7f53df252d6f1d7113d Conflicts: envsetup.sh --- envsetup.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 391620b2f..2e326d578 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -749,7 +749,7 @@ function eat() done echo "Device Found.." fi - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); + if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD"); then # if adbd isn't root we can't write to /cache/recovery/ adb root @@ -2118,7 +2118,7 @@ function installboot() sleep 1 adb wait-for-online shell mount /system 2>&1 > /dev/null adb wait-for-online remount - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); + if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD"); then adb push $OUT/boot.img /cache/ for i in $OUT/system/lib/modules/*; @@ -2163,7 +2163,7 @@ function installrecovery() sleep 1 adb wait-for-online shell mount /system 2>&1 > /dev/null adb wait-for-online remount - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD"); + if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD"); then adb push $OUT/recovery.img /cache/ adb shell dd if=/cache/recovery.img of=$PARTITION @@ -2189,7 +2189,7 @@ function dopush() echo "Device Found." fi - if (adb shell cat /system/build.prop | grep -q "ro.slim.device=$SLIM_BUILD") || [ "$FORCE_PUSH" == "true" ]; + if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD") || [ "$FORCE_PUSH" == "true" ]; then # retrieve IP and PORT info if we're using a TCP connection TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \ From a9b78d0376c94d555f34d8290740d6f1f6d7f6fc Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Wed, 15 Jul 2015 19:47:45 +0530 Subject: [PATCH 206/309] Allow setting the recovery density separately from the aapt config Change-Id: I817a4c01302956ec846503d9a585c43e690e45bf --- core/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/Makefile b/core/Makefile index 5d53d6699..64fb59b6e 100644 --- a/core/Makefile +++ b/core/Makefile @@ -823,6 +823,9 @@ recovery_resources_common := $(call project-path-for,recovery)/res # Set recovery_density to the density bucket of the device. recovery_density := unknown +ifneq (,$(TARGET_RECOVERY_DENSITY)) +recovery_density := $(filter %dpi,$(TARGET_RECOVERY_DENSITY)) +else ifneq (,$(PRODUCT_AAPT_PREF_CONFIG)) # If PRODUCT_AAPT_PREF_CONFIG includes a dpi bucket, then use that value. recovery_density := $(filter %dpi,$(PRODUCT_AAPT_PREF_CONFIG)) @@ -830,6 +833,7 @@ else # Otherwise, use the default medium density. recovery_densities := mdpi endif +endif ifneq (,$(wildcard $(recovery_resources_common)-$(recovery_density))) recovery_resources_common := $(recovery_resources_common)-$(recovery_density) From 3d3eda645c70ef240c350fd716a70ea7ca8a644a Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Wed, 22 Jul 2015 14:10:29 -0700 Subject: [PATCH 207/309] misc_info: Add factory script location to misc_info when available When a factory script is specified for the device, store the location of the script in the misc_info package for proper use during package signing. Change-Id: Icd3ab59ebb68b8b6c92aff8eaf9e44322cdcbc18 Ticket: RM-112 --- core/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/Makefile b/core/Makefile index 64fb59b6e..89f722d6f 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1719,6 +1719,9 @@ ifneq ($(OEM_THUMBPRINT_PROPERTIES),) endif ifdef BUILD_NO $(hide) echo "build_number=$(BUILD_NO)" >> $(zip_root)/META/misc_info.txt +endif +ifdef TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT + $(hide) echo "factory_from_target_script=$(TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT)" >> $(zip_root)/META/misc_info.txt endif $(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt) ifeq ($(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT),) From 0a09d0ee0f71a28b779d475fdbc5a4138c484417 Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Thu, 23 Jul 2015 10:36:27 -0700 Subject: [PATCH 208/309] build: Allow packaging of factory image files into target files Files may be required inthe target files package for factory flashing processes. Allow these to be added using PRODUCT_FACTORYIMAGE_FILES. Change-Id: I74b42daee7b73f762f3c75904c6c278b1e5f5c53 Ticket: RM-112 --- core/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/Makefile b/core/Makefile index 89f722d6f..77389b72c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1656,6 +1656,9 @@ endif $(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\ mkdir -p $(zip_root)/RADIO; \ $(ACP) $(t) $(zip_root)/RADIO/$(notdir $(t));) + $(hide) $(foreach fi,$(PRODUCT_FACTORYIMAGE_FILES),\ + mkdir -p $(zip_root)/FACTORY; \ + $(ACP) $(fi) $(zip_root)/FACTORY/$(notdir $(fi));) @# Contents of the system image $(hide) $(call package_files-copy-root, \ $(SYSTEMIMAGE_SOURCE_DIR),$(zip_root)/SYSTEM) From 6b04501aeb957c3d99d3842926c905d0f05e9047 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Tue, 9 Jun 2015 19:35:53 -0500 Subject: [PATCH 209/309] build: Enable custom ccache cache dir for Android During lunch, check environment variables for ANDROID_CCACHE_DIR. If it is specified, CCACHE_DIR is set to match. Prevents issues with multiple versions of ccache writing to the same cache dir. ANDROID_CCACHE_SIZE should also be specified, allowing for a distinct cache directory size from the default. Syntax for ANDROID_CCACHE_SIZE is the same as applied in the command: 'ccache -M$ANDROID_CCACHE_SIZE' Change-Id: I17497a6f56347850e0fa7b8ebd4de0b5a0b13e55 --- core/combo/select.mk | 6 ++++++ envsetup.sh | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/core/combo/select.mk b/core/combo/select.mk index 5842705ff..7c80c5fef 100644 --- a/core/combo/select.mk +++ b/core/combo/select.mk @@ -88,7 +88,13 @@ ifneq ($(USE_CCACHE),) ifndef CXX_WRAPPER CXX_WRAPPER := $(ccache) endif + ifeq ($(ANDROID_CCACHE_DIR), $(CCACHE_DIR)) + ifneq ($(ANDROID_CCACHE_SIZE),) + ACCSIZE_RESULT := $(shell $(ccache) -M$(ANDROID_CCACHE_SIZE)) + endif + endif ccache = + ACCSIZE_RESULT = endif endif diff --git a/envsetup.sh b/envsetup.sh index 2e326d578..cd4b8d66c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -236,6 +236,10 @@ function setpaths() unset ANDROID_HOST_OUT export ANDROID_HOST_OUT=$(get_abs_build_var HOST_OUT) + if [ -n "$ANDROID_CCACHE_DIR" ]; then + export CCACHE_DIR=$ANDROID_CCACHE_DIR + fi + # needed for building linux on MacOS # TODO: fix the path #export HOST_EXTRACFLAGS="-I "$T/system/kernel_headers/host_include From 1bc3c3ea490b27ed1fe4d8aab222790c2d684c63 Mon Sep 17 00:00:00 2001 From: Andreas Blaesius Date: Fri, 15 May 2015 06:11:44 -0400 Subject: [PATCH 210/309] Force bsdiff for recovery_from_boot.p if not using GZIP imgdiff expects a ramdisk to use GZIP, which is causing a chunk size issue if recovery is using LZMA Change-Id: I3194b4faed25a298fe23b4e24b6dea2885bafda8 --- core/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/Makefile b/core/Makefile index 77389b72c..b97f209d6 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1189,8 +1189,12 @@ $(RECOVERY_FROM_BOOT_PATCH): $(INSTALLED_RECOVERYIMAGE_TARGET) \ $(HOST_OUT_EXECUTABLES)/bsdiff @echo -e ${CL_CYN}"Construct recovery from boot"${CL_RST} mkdir -p $(dir $@) +ifeq ($(TARGET_NOT_USE_GZIP_RECOVERY_RAMDISK),true) + PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/bsdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@ +else PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/imgdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@ endif +endif $(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) $(RECOVERY_FROM_BOOT_PATCH) | $(ACP) From 99519fd0d38b94c0350e5b6ff4b16daefedef067 Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Tue, 12 Apr 2011 10:57:22 +0100 Subject: [PATCH 211/309] Allow a device to generically define its own headers We have a few cases of devices including specific versions of projects just because of modified headers (msm_mdp.h comes to mind), and I just had enough of ifdeffing header files for specific cases (the P990 needs a lot of these). Now... if a target defines a TARGET_SPECIFIC_HEADER_PATH, any headers in there will take precedence over the standard ones; for example, on the p990, I have TARGET_SPECIFIC_HEADER_PATH := device/lge/p990/include which makes, for example, the device/lge/p990/include/hardware_legacy/AudioHardwareInterface.h be used instead of hardware/libhardware_legacy/include/hardware_legacy/AudioHardwareInterface.h whenever a source file uses Change-Id: I41b62668b60e3f62a6ebd3738d8d2675103a81e6a build: fix target header overlay LOCAL_C_INCLUDES as defined by the makefile should be put AFTER the overlay includes so the overlay always takes precedence. Change-Id: I489b2aab6dbacd9122d834f85e07b63ed1271f07 --- core/binary.mk | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/binary.mk b/core/binary.mk index 649565979..4206597b3 100644 --- a/core/binary.mk +++ b/core/binary.mk @@ -956,6 +956,11 @@ normal_objects := \ all_objects := $(normal_objects) $(gen_o_objects) +## Allow a device's own headers to take precedence over global ones +ifneq ($(TARGET_SPECIFIC_HEADER_PATH),) +my_c_includes := $(TOPDIR)$(TARGET_SPECIFIC_HEADER_PATH) $(my_c_includes) +endif + my_c_includes += $(TOPDIR)$(LOCAL_PATH) $(intermediates) $(generated_sources_dir) ifndef LOCAL_SDK_VERSION From 4bd485dad2a52620f8b9bf8ca3bb51026c14fc37 Mon Sep 17 00:00:00 2001 From: Brandon McAnsh Date: Sat, 10 Oct 2015 10:20:38 -0400 Subject: [PATCH 212/309] Set kernel gcc version to 4.8 for ARM targets * This causes the check during path setup for an arm-eabi-4.9 toolchain to fail, thus only leaving the arm-linux-androideabi- to exist. * This is only temporary as Google is building with 4.9 and 4.8 gcc's have been removed in their next release Was updated to 4.9 here: https://github.com/CyanogenMod/android_build/commit/bf8346e90c5a6d5d809fa7166d50714b2b6bc30d Toolchain check here: https://github.com/CyanogenMod/android_build/blob/cm-13.0/envsetup.sh#L189 Change-Id: I24bff10e392a6cdced1797870e523144e83d611d Signed-off-by: Brandon McAnsh --- core/combo/TARGET_linux-arm.mk | 1 + envsetup.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk index 3651c39d9..5020865ea 100644 --- a/core/combo/TARGET_linux-arm.mk +++ b/core/combo/TARGET_linux-arm.mk @@ -39,6 +39,7 @@ $(combo_2nd_arch_prefix)TARGET_NDK_GCC_VERSION := 4.9 ifeq ($(strip $(TARGET_GCC_VERSION_EXP)),) $(combo_2nd_arch_prefix)TARGET_GCC_VERSION := 4.9 +$(combo_2nd_arch_prefix)TARGET_LEGACY_GCC_VERSION := 4.8 else $(combo_2nd_arch_prefix)TARGET_GCC_VERSION := $(TARGET_GCC_VERSION_EXP) endif diff --git a/envsetup.sh b/envsetup.sh index cd4b8d66c..afe1bacf7 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -152,6 +152,7 @@ function setpaths() # defined in core/config.mk targetgccversion=$(get_build_var TARGET_GCC_VERSION) targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION) + targetlegacygccversion=$(get_build_var TARGET_LEGACY_GCC_VERSION) export TARGET_GCC_VERSION=$targetgccversion # The gcc toolchain does not exists for windows/cygwin. In this case, do not reference it. @@ -187,7 +188,7 @@ function setpaths() case $ARCH in arm) # Legacy toolchain configuration used for ARM kernel compilation - toolchaindir=arm/arm-eabi-$targetgccversion/bin + toolchaindir=arm/arm-eabi-$targetlegacygccversion/bin if [ -d "$gccprebuiltdir/$toolchaindir" ]; then export ARM_EABI_TOOLCHAIN="$gccprebuiltdir/$toolchaindir" ANDROID_KERNEL_TOOLCHAIN_PATH="$ARM_EABI_TOOLCHAIN": From d20e13cf62d84171052b2536fcd87453a87b4a70 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Mon, 12 Oct 2015 16:35:01 +0300 Subject: [PATCH 213/309] Underp releasetools Change-Id: I8f9752607b64555b99de317610b1e2516ec4037d Signed-off-by: Josue Rivera --- tools/releasetools/ota_from_target_files | 1754 +------------------ tools/releasetools/ota_from_target_files.py | 1753 ++++++++++++++++++ 2 files changed, 1754 insertions(+), 1753 deletions(-) mode change 100755 => 120000 tools/releasetools/ota_from_target_files create mode 100755 tools/releasetools/ota_from_target_files.py diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files deleted file mode 100755 index c8fa8553d..000000000 --- a/tools/releasetools/ota_from_target_files +++ /dev/null @@ -1,1753 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2008 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Given a target-files zipfile, produces an OTA package that installs -that build. An incremental OTA is produced if -i is given, otherwise -a full OTA is produced. - -Usage: ota_from_target_files [flags] input_target_files output_ota_package - - --board_config - Deprecated. - - -k (--package_key) Key to use to sign the package (default is - the value of default_system_dev_certificate from the input - target-files's META/misc_info.txt, or - "build/target/product/security/testkey" if that value is not - specified). - - For incremental OTAs, the default value is based on the source - target-file, not the target build. - - -i (--incremental_from) - Generate an incremental OTA using the given target-files zip as - the starting build. - - --full_radio - When generating an incremental OTA, always include a full copy of - radio image. This option is only meaningful when -i is specified, - because a full radio is always included in a full OTA if applicable. - - --full_bootloader - When generating an incremental OTA, always include a full copy of - bootloader image. This option is only meaningful when -i is specified, - because a full bootloader is always included in a full OTA if applicable. - - -v (--verify) - Remount and verify the checksums of the files written to the - system and vendor (if used) partitions. Incremental builds only. - - -o (--oem_settings) - Use the file to specify the expected OEM-specific properties - on the OEM partition of the intended device. - - -w (--wipe_user_data) - Generate an OTA package that will wipe the user data partition - when installed. - - -n (--no_prereq) - Omit the timestamp prereq check normally included at the top of - the build scripts (used for developer OTA packages which - legitimately need to go back and forth). - - -e (--extra_script) - Insert the contents of file at the end of the update script. - - -a (--aslr_mode) - Specify whether to turn on ASLR for the package (on by default). - - -2 (--two_step) - Generate a 'two-step' OTA package, where recovery is updated - first, so that any changes made to the system partition are done - using the new recovery (new kernel, etc.). - - --block - Generate a block-based OTA if possible. Will fall back to a - file-based OTA if the target_files is older and doesn't support - block-based OTAs. - - -b (--binary) - Use the given binary as the update-binary in the output package, - instead of the binary in the build's target_files. Use for - development only. - - -t (--worker_threads) - Specifies the number of worker-threads that will be used when - generating patches for incremental updates (defaults to 3). - - --backup - Enable or disable the execution of backuptool.sh. - Disabled by default. - - --override_device - Override device-specific asserts. Can be a comma-separated list. - - --override_prop - Override build.prop items with custom vendor init. - Enabled when TARGET_UNIFIED_DEVICE is defined in BoardConfig - -""" - -import sys - -if sys.hexversion < 0x02070000: - print >> sys.stderr, "Python 2.7 or newer is required." - sys.exit(1) - -import multiprocessing -import os -import tempfile -import zipfile - -import common -import edify_generator -import sparse_img - -OPTIONS = common.OPTIONS -OPTIONS.package_key = None -OPTIONS.incremental_source = None -OPTIONS.verify = False -OPTIONS.require_verbatim = set() -OPTIONS.prohibit_verbatim = set(("system/build.prop",)) -OPTIONS.patch_threshold = 0.95 -OPTIONS.wipe_user_data = False -OPTIONS.omit_prereq = False -OPTIONS.extra_script = None -OPTIONS.aslr_mode = True -OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 -if OPTIONS.worker_threads == 0: - OPTIONS.worker_threads = 1 -OPTIONS.two_step = False -OPTIONS.no_signing = False -OPTIONS.block_based = False -OPTIONS.updater_binary = None -OPTIONS.oem_source = None -OPTIONS.fallback_to_full = True -OPTIONS.full_radio = False -OPTIONS.full_bootloader = False -OPTIONS.backuptool = False -OPTIONS.override_device = 'auto' -OPTIONS.override_prop = False - -def MostPopularKey(d, default): - """Given a dict, return the key corresponding to the largest - value. Returns 'default' if the dict is empty.""" - x = [(v, k) for (k, v) in d.iteritems()] - if not x: - return default - x.sort() - return x[-1][1] - - -def IsSymlink(info): - """Return true if the zipfile.ZipInfo object passed in represents a - symlink.""" - return (info.external_attr >> 16) & 0o770000 == 0o120000 - -def IsRegular(info): - """Return true if the zipfile.ZipInfo object passed in represents a - symlink.""" - return (info.external_attr >> 28) == 0o10 - -def ClosestFileMatch(src, tgtfiles, existing): - """Returns the closest file match between a source file and list - of potential matches. The exact filename match is preferred, - then the sha1 is searched for, and finally a file with the same - basename is evaluated. Rename support in the updater-binary is - required for the latter checks to be used.""" - - result = tgtfiles.get("path:" + src.name) - if result is not None: - return result - - if not OPTIONS.target_info_dict.get("update_rename_support", False): - return None - - if src.size < 1000: - return None - - result = tgtfiles.get("sha1:" + src.sha1) - if result is not None and existing.get(result.name) is None: - return result - result = tgtfiles.get("file:" + src.name.split("/")[-1]) - if result is not None and existing.get(result.name) is None: - return result - return None - -class ItemSet(object): - def __init__(self, partition, fs_config): - self.partition = partition - self.fs_config = fs_config - self.ITEMS = {} - - def Get(self, name, is_dir=False): - if name not in self.ITEMS: - self.ITEMS[name] = Item(self, name, is_dir=is_dir) - return self.ITEMS[name] - - def GetMetadata(self, input_zip): - # The target_files contains a record of what the uid, - # gid, and mode are supposed to be. - output = input_zip.read(self.fs_config) - - for line in output.split("\n"): - if not line: - continue - columns = line.split() - name, uid, gid, mode = columns[:4] - selabel = None - capabilities = None - - # After the first 4 columns, there are a series of key=value - # pairs. Extract out the fields we care about. - for element in columns[4:]: - key, value = element.split("=") - if key == "selabel": - selabel = value - if key == "capabilities": - capabilities = value - - i = self.ITEMS.get(name, None) - if i is not None: - i.uid = int(uid) - i.gid = int(gid) - i.mode = int(mode, 8) - i.selabel = selabel - i.capabilities = capabilities - if i.is_dir: - i.children.sort(key=lambda i: i.name) - - # set metadata for the files generated by this script. - i = self.ITEMS.get("system/recovery-from-boot.p", None) - if i: - i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None - i = self.ITEMS.get("system/etc/install-recovery.sh", None) - if i: - i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None - - -class Item(object): - """Items represent the metadata (user, group, mode) of files and - directories in the system image.""" - def __init__(self, itemset, name, is_dir=False): - self.itemset = itemset - self.name = name - self.uid = None - self.gid = None - self.mode = None - self.selabel = None - self.capabilities = None - self.is_dir = is_dir - self.descendants = None - self.best_subtree = None - - if name: - self.parent = itemset.Get(os.path.dirname(name), is_dir=True) - self.parent.children.append(self) - else: - self.parent = None - if self.is_dir: - self.children = [] - - def Dump(self, indent=0): - if self.uid is not None: - print "%s%s %d %d %o" % ( - " " * indent, self.name, self.uid, self.gid, self.mode) - else: - print "%s%s %s %s %s" % ( - " " * indent, self.name, self.uid, self.gid, self.mode) - if self.is_dir: - print "%s%s" % (" "*indent, self.descendants) - print "%s%s" % (" "*indent, self.best_subtree) - for i in self.children: - i.Dump(indent=indent+1) - - def CountChildMetadata(self): - """Count up the (uid, gid, mode, selabel, capabilities) tuples for - all children and determine the best strategy for using set_perm_recursive - and set_perm to correctly chown/chmod all the files to their desired - values. Recursively calls itself for all descendants. - - Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} - counting up all descendants of this node. (dmode or fmode may be None.) - Also sets the best_subtree of each directory Item to the (uid, gid, dmode, - fmode, selabel, capabilities) tuple that will match the most descendants of - that Item. - """ - - assert self.is_dir - key = (self.uid, self.gid, self.mode, None, self.selabel, - self.capabilities) - self.descendants = {key: 1} - d = self.descendants - for i in self.children: - if i.is_dir: - for k, v in i.CountChildMetadata().iteritems(): - d[k] = d.get(k, 0) + v - else: - k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities) - d[k] = d.get(k, 0) + 1 - - # Find the (uid, gid, dmode, fmode, selabel, capabilities) - # tuple that matches the most descendants. - - # First, find the (uid, gid) pair that matches the most - # descendants. - ug = {} - for (uid, gid, _, _, _, _), count in d.iteritems(): - ug[(uid, gid)] = ug.get((uid, gid), 0) + count - ug = MostPopularKey(ug, (0, 0)) - - # Now find the dmode, fmode, selabel, and capabilities that match - # the most descendants with that (uid, gid), and choose those. - best_dmode = (0, 0o755) - best_fmode = (0, 0o644) - best_selabel = (0, None) - best_capabilities = (0, None) - for k, count in d.iteritems(): - if k[:2] != ug: - continue - if k[2] is not None and count >= best_dmode[0]: - best_dmode = (count, k[2]) - if k[3] is not None and count >= best_fmode[0]: - best_fmode = (count, k[3]) - if k[4] is not None and count >= best_selabel[0]: - best_selabel = (count, k[4]) - if k[5] is not None and count >= best_capabilities[0]: - best_capabilities = (count, k[5]) - self.best_subtree = ug + ( - best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1]) - - return d - - def SetPermissions(self, script): - """Append set_perm/set_perm_recursive commands to 'script' to - set all permissions, users, and groups for the tree of files - rooted at 'self'.""" - - self.CountChildMetadata() - - def recurse(item, current): - # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple - # that the current item (and all its children) have already been set to. - # We only need to issue set_perm/set_perm_recursive commands if we're - # supposed to be something different. - if item.is_dir: - if current != item.best_subtree: - script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) - current = item.best_subtree - - if item.uid != current[0] or item.gid != current[1] or \ - item.mode != current[2] or item.selabel != current[4] or \ - item.capabilities != current[5]: - script.SetPermissions("/"+item.name, item.uid, item.gid, - item.mode, item.selabel, item.capabilities) - - for i in item.children: - recurse(i, current) - else: - if item.uid != current[0] or item.gid != current[1] or \ - item.mode != current[3] or item.selabel != current[4] or \ - item.capabilities != current[5]: - script.SetPermissions("/"+item.name, item.uid, item.gid, - item.mode, item.selabel, item.capabilities) - - recurse(self, (-1, -1, -1, -1, None, None)) - - -def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None): - """Copies files for the partition in the input zip to the output - zip. Populates the Item class with their metadata, and returns a - list of symlinks. output_zip may be None, in which case the copy is - skipped (but the other side effects still happen). substitute is an - optional dict of {output filename: contents} to be output instead of - certain input files. - """ - - symlinks = [] - - partition = itemset.partition - - for info in input_zip.infolist(): - prefix = partition.upper() + "/" - if info.filename.startswith(prefix): - basefilename = info.filename[len(prefix):] - if IsSymlink(info): - symlinks.append((input_zip.read(info.filename), - "/" + partition + "/" + basefilename)) - else: - import copy - info2 = copy.copy(info) - fn = info2.filename = partition + "/" + basefilename - if substitute and fn in substitute and substitute[fn] is None: - continue - if output_zip is not None: - if substitute and fn in substitute: - data = substitute[fn] - else: - data = input_zip.read(info.filename) - common.ZipWriteStr(output_zip, info2, data) - if fn.endswith("/"): - itemset.Get(fn[:-1], is_dir=True) - else: - itemset.Get(fn) - - symlinks.sort() - return symlinks - - -def SignOutput(temp_zip_name, output_zip_name): - key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) - pw = key_passwords[OPTIONS.package_key] - - common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, - whole_file=True) - - -def AppendAssertions(script, info_dict, oem_dict=None): - oem_props = info_dict.get("oem_fingerprint_properties") - if oem_props is None or len(oem_props) == 0: - if OPTIONS.override_device == "auto": - device = GetBuildProp("ro.product.device", info_dict) - else: - device = OPTIONS.override_device - script.AssertDevice(device) - else: - if oem_dict is None: - raise common.ExternalError( - "No OEM file provided to answer expected assertions") - for prop in oem_props.split(): - if oem_dict.get(prop) is None: - raise common.ExternalError( - "The OEM file is missing the property %s" % prop) - script.AssertOemProperty(prop, oem_dict.get(prop)) - - -def HasRecoveryPatch(target_files_zip): - try: - target_files_zip.getinfo("SYSTEM/recovery-from-boot.p") - return True - except KeyError: - return False - -def HasVendorPartition(target_files_zip): - try: - target_files_zip.getinfo("VENDOR/") - return True - except KeyError: - return False - -def GetOemProperty(name, oem_props, oem_dict, info_dict): - if oem_props is not None and name in oem_props: - return oem_dict[name] - return GetBuildProp(name, info_dict) - -def CalculateFingerprint(oem_props, oem_dict, info_dict): - if oem_props is None: - return GetBuildProp("ro.build.fingerprint", info_dict) - return "%s/%s/%s:%s" % ( - GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict), - GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict), - GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict), - GetBuildProp("ro.build.thumbprint", info_dict)) - - -def GetImage(which, tmpdir, info_dict): - # Return an image object (suitable for passing to BlockImageDiff) - # for the 'which' partition (most be "system" or "vendor"). If a - # prebuilt image and file map are found in tmpdir they are used, - # otherwise they are reconstructed from the individual files. - - assert which in ("system", "vendor") - - path = os.path.join(tmpdir, "IMAGES", which + ".img") - mappath = os.path.join(tmpdir, "IMAGES", which + ".map") - if os.path.exists(path) and os.path.exists(mappath): - print "using %s.img from target-files" % (which,) - # This is a 'new' target-files, which already has the image in it. - - else: - print "building %s.img from target-files" % (which,) - - # This is an 'old' target-files, which does not contain images - # already built. Build them. - - mappath = tempfile.mkstemp()[1] - OPTIONS.tempfiles.append(mappath) - - import add_img_to_target_files - if which == "system": - path = add_img_to_target_files.BuildSystem( - tmpdir, info_dict, block_list=mappath) - elif which == "vendor": - path = add_img_to_target_files.BuildVendor( - tmpdir, info_dict, block_list=mappath) - - # Bug: http://b/20939131 - # In ext4 filesystems, block 0 might be changed even being mounted - # R/O. We add it to clobbered_blocks so that it will be written to the - # target unconditionally. Note that they are still part of care_map. - clobbered_blocks = "0" - - return sparse_img.SparseImage(path, mappath, clobbered_blocks) - - -def CopyInstallTools(output_zip): - install_path = os.path.join(OPTIONS.input_tmp, "INSTALL") - for root, subdirs, files in os.walk(install_path): - for f in files: - install_source = os.path.join(root, f) - install_target = os.path.join("install", os.path.relpath(root, install_path), f) - output_zip.write(install_source, install_target) - - -def WriteFullOTAPackage(input_zip, output_zip): - # TODO: how to determine this? We don't know what version it will - # be installed on top of. For now, we expect the API just won't - # change very often. Similarly for fstab, it might have changed - # in the target build. - script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) - - oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") - recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") - oem_dict = None - if oem_props is not None and len(oem_props) > 0: - if OPTIONS.oem_source is None: - raise common.ExternalError("OEM source required for this build") - script.Mount("/oem", recovery_mount_options) - oem_dict = common.LoadDictionaryFromLines( - open(OPTIONS.oem_source).readlines()) - - if OPTIONS.override_prop: - metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.info_dict), - } - else: - metadata = {"post-build": CalculateFingerprint( - oem_props, oem_dict, OPTIONS.info_dict), - "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, - OPTIONS.info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.info_dict), - } - - device_specific = common.DeviceSpecificParams( - input_zip=input_zip, - input_version=OPTIONS.info_dict["recovery_api_version"], - output_zip=output_zip, - script=script, - input_tmp=OPTIONS.input_tmp, - metadata=metadata, - info_dict=OPTIONS.source_info_dict) - - has_recovery_patch = HasRecoveryPatch(input_zip) - block_based = OPTIONS.block_based and has_recovery_patch - - #if not OPTIONS.omit_prereq: - # ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) - # ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict) - # script.AssertOlderBuild(ts, ts_text) - - AppendAssertions(script, OPTIONS.info_dict, oem_dict) - device_specific.FullOTA_Assertions() - - # Two-step package strategy (in chronological order, which is *not* - # the order in which the generated script has things): - # - # if stage is not "2/3" or "3/3": - # write recovery image to boot partition - # set stage to "2/3" - # reboot to boot partition and restart recovery - # else if stage is "2/3": - # write recovery image to recovery partition - # set stage to "3/3" - # reboot to recovery partition and restart recovery - # else: - # (stage must be "3/3") - # set stage to "" - # do normal full package installation: - # wipe and install system, boot image, etc. - # set up system to update recovery partition on first boot - # complete script normally - # (allow recovery to mark itself finished and reboot) - - recovery_img = common.GetBootableImage("recovery.img", "recovery.img", - OPTIONS.input_tmp, "RECOVERY") - if OPTIONS.two_step: - if not OPTIONS.info_dict.get("multistage_support", None): - assert False, "two-step packages not supported by this build" - fs = OPTIONS.info_dict["fstab"]["/misc"] - assert fs.fs_type.upper() == "EMMC", \ - "two-step packages only supported on devices with EMMC /misc partitions" - bcb_dev = {"bcb_dev": fs.device} - common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) - script.AppendExtra(""" -if get_stage("%(bcb_dev)s") == "2/3" then -""" % bcb_dev) - script.WriteRawImage("/recovery", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "3/3"); -reboot_now("%(bcb_dev)s", "recovery"); -else if get_stage("%(bcb_dev)s") == "3/3" then -""" % bcb_dev) - - # Dump fingerprints - script.Print("Target: %s" % CalculateFingerprint( - oem_props, oem_dict, OPTIONS.info_dict)) - - script.AppendExtra("ifelse(is_mounted(\"/system\"), unmount(\"/system\"));") - device_specific.FullOTA_InstallBegin() - - CopyInstallTools(output_zip) - script.UnpackPackageDir("install", "/tmp/install") - script.SetPermissionsRecursive("/tmp/install", 0, 0, 0755, 0644, None, None) - script.SetPermissionsRecursive("/tmp/install/bin", 0, 0, 0755, 0755, None, None) - - if OPTIONS.backuptool: - script.Mount("/system") - script.RunBackup("backup") - script.Unmount("/system") - - system_progress = 0.75 - - if OPTIONS.wipe_user_data: - system_progress -= 0.1 - if HasVendorPartition(input_zip): - system_progress -= 0.1 - - script.AppendExtra("if is_mounted(\"/data\") then") - script.ValidateSignatures("data") - script.AppendExtra("else") - script.Mount("/data") - script.ValidateSignatures("data") - script.Unmount("/data") - script.AppendExtra("endif;") - - if "selinux_fc" in OPTIONS.info_dict: - WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) - - recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") - - system_items = ItemSet("system", "META/filesystem_config.txt") - script.ShowProgress(system_progress, 0) - - if block_based: - # Full OTA is done as an "incremental" against an empty source - # image. This has the effect of writing new data from the package - # to the entire partition, but lets us reuse the updater code that - # writes incrementals to do it. - system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) - system_tgt.ResetFileMap() - system_diff = common.BlockDifference("system", system_tgt, src=None) - system_diff.WriteScript(script, output_zip) - else: - script.FormatPartition("/system") - script.Mount("/system", recovery_mount_options) - if not has_recovery_patch: - script.UnpackPackageDir("recovery", "/system") - script.UnpackPackageDir("system", "/system") - - symlinks = CopyPartitionFiles(system_items, input_zip, output_zip) - script.MakeSymlinks(symlinks) - - boot_img = common.GetBootableImage("boot.img", "boot.img", - OPTIONS.input_tmp, "BOOT") - - if not block_based: - def output_sink(fn, data): - common.ZipWriteStr(output_zip, "recovery/" + fn, data) - system_items.Get("system/" + fn) - - common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, - recovery_img, boot_img) - - system_items.GetMetadata(input_zip) - system_items.Get("system").SetPermissions(script) - - if HasVendorPartition(input_zip): - vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") - script.ShowProgress(0.1, 0) - - if block_based: - vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) - vendor_tgt.ResetFileMap() - vendor_diff = common.BlockDifference("vendor", vendor_tgt) - vendor_diff.WriteScript(script, output_zip) - else: - script.FormatPartition("/vendor") - script.Mount("/vendor", recovery_mount_options) - script.UnpackPackageDir("vendor", "/vendor") - - symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip) - script.MakeSymlinks(symlinks) - - vendor_items.GetMetadata(input_zip) - vendor_items.Get("vendor").SetPermissions(script) - - common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) - common.ZipWriteStr(output_zip, "boot.img", boot_img.data) - - device_specific.FullOTA_PostValidate() - - if OPTIONS.backuptool: - script.ShowProgress(0.02, 10) - if block_based: - script.Mount("/system") - script.RunBackup("restore") - if block_based: - script.Unmount("/system") - - script.ShowProgress(0.05, 5) - script.WriteRawImage("/boot", "boot.img") - - script.ShowProgress(0.2, 10) - device_specific.FullOTA_InstallEnd() - - if OPTIONS.extra_script is not None: - script.AppendExtra(OPTIONS.extra_script) - - script.UnmountAll() - - if OPTIONS.wipe_user_data: - script.ShowProgress(0.1, 10) - script.FormatPartition("/data") - - if OPTIONS.two_step: - script.AppendExtra(""" -set_stage("%(bcb_dev)s", ""); -""" % bcb_dev) - script.AppendExtra("else\n") - script.WriteRawImage("/boot", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "2/3"); -reboot_now("%(bcb_dev)s", ""); -endif; -endif; -""" % bcb_dev) - script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) - WriteMetadata(metadata, output_zip) - - common.ZipWriteStr(output_zip, "system/build.prop", - ""+input_zip.read("SYSTEM/build.prop")) - - common.ZipWriteStr(output_zip, "META-INF/org/cyanogenmod/releasekey", - ""+input_zip.read("META/releasekey.txt")) - -def WritePolicyConfig(file_name, output_zip): - common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) - -def WriteMetadata(metadata, output_zip): - common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", - "".join(["%s=%s\n" % kv - for kv in sorted(metadata.iteritems())])) - - -def LoadPartitionFiles(z, partition): - """Load all the files from the given partition in a given target-files - ZipFile, and return a dict of {filename: File object}.""" - out = {} - prefix = partition.upper() + "/" - for info in z.infolist(): - if info.filename.startswith(prefix) and not IsSymlink(info): - basefilename = info.filename[len(prefix):] - fn = partition + "/" + basefilename - data = z.read(info.filename) - out[fn] = common.File(fn, data) - return out - - -def GetBuildProp(prop, info_dict): - """Return the fingerprint of the build of a given target-files info_dict.""" - try: - return info_dict.get("build.prop", {})[prop] - except KeyError: - raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) - - -def AddToKnownPaths(filename, known_paths): - if filename[-1] == "/": - return - dirs = filename.split("/")[:-1] - while len(dirs) > 0: - path = "/".join(dirs) - if path in known_paths: - break - known_paths.add(path) - dirs.pop() - - -def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): - source_version = OPTIONS.source_info_dict["recovery_api_version"] - target_version = OPTIONS.target_info_dict["recovery_api_version"] - - if source_version == 0: - print ("WARNING: generating edify script for a source that " - "can't install it.") - script = edify_generator.EdifyGenerator( - source_version, OPTIONS.target_info_dict, - fstab=OPTIONS.source_info_dict["fstab"]) - - if OPTIONS.override_prop: - metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } - else: - metadata = {"pre-device": GetBuildProp("ro.product.device", - OPTIONS.source_info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } - - device_specific = common.DeviceSpecificParams( - source_zip=source_zip, - source_version=source_version, - target_zip=target_zip, - target_version=target_version, - output_zip=output_zip, - script=script, - metadata=metadata, - info_dict=OPTIONS.source_info_dict) - - # TODO: Currently this works differently from WriteIncrementalOTAPackage(). - # This function doesn't consider thumbprints when writing - # metadata["pre/post-build"]. One possible reason is that the current - # devices with thumbprints are all using file-based OTAs. Long term we - # should factor out the common parts into a shared one to avoid further - # divergence. - source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) - target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) - metadata["pre-build"] = source_fp - metadata["post-build"] = target_fp - - source_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", - OPTIONS.source_info_dict) - target_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") - updating_boot = (not OPTIONS.two_step and - (source_boot.data != target_boot.data)) - - target_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") - - system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict) - system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict) - - blockimgdiff_version = 1 - if OPTIONS.info_dict: - blockimgdiff_version = max( - int(i) for i in - OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) - - system_diff = common.BlockDifference("system", system_tgt, system_src, - version=blockimgdiff_version) - - if HasVendorPartition(target_zip): - if not HasVendorPartition(source_zip): - raise RuntimeError("can't generate incremental that adds /vendor") - vendor_src = GetImage("vendor", OPTIONS.source_tmp, - OPTIONS.source_info_dict) - vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, - OPTIONS.target_info_dict) - vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, - version=blockimgdiff_version) - else: - vendor_diff = None - - oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties") - recovery_mount_options = OPTIONS.source_info_dict.get( - "recovery_mount_options") - oem_dict = None - if oem_props is not None and len(oem_props) > 0: - if OPTIONS.oem_source is None: - raise common.ExternalError("OEM source required for this build") - script.Mount("/oem", recovery_mount_options) - oem_dict = common.LoadDictionaryFromLines( - open(OPTIONS.oem_source).readlines()) - - AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) - device_specific.IncrementalOTA_Assertions() - - # Two-step incremental package strategy (in chronological order, - # which is *not* the order in which the generated script has - # things): - # - # if stage is not "2/3" or "3/3": - # do verification on current system - # write recovery image to boot partition - # set stage to "2/3" - # reboot to boot partition and restart recovery - # else if stage is "2/3": - # write recovery image to recovery partition - # set stage to "3/3" - # reboot to recovery partition and restart recovery - # else: - # (stage must be "3/3") - # perform update: - # patch system files, etc. - # force full install of new boot image - # set up system to update recovery partition on first boot - # complete script normally - # (allow recovery to mark itself finished and reboot) - - if OPTIONS.two_step: - if not OPTIONS.source_info_dict.get("multistage_support", None): - assert False, "two-step packages not supported by this build" - fs = OPTIONS.source_info_dict["fstab"]["/misc"] - assert fs.fs_type.upper() == "EMMC", \ - "two-step packages only supported on devices with EMMC /misc partitions" - bcb_dev = {"bcb_dev": fs.device} - common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) - script.AppendExtra(""" -if get_stage("%(bcb_dev)s") == "2/3" then -""" % bcb_dev) - script.AppendExtra("sleep(20);\n") - script.WriteRawImage("/recovery", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "3/3"); -reboot_now("%(bcb_dev)s", "recovery"); -else if get_stage("%(bcb_dev)s") != "3/3" then -""" % bcb_dev) - - # Dump fingerprints - script.Print("Source: %s" % CalculateFingerprint( - oem_props, oem_dict, OPTIONS.source_info_dict)) - script.Print("Target: %s" % CalculateFingerprint( - oem_props, oem_dict, OPTIONS.target_info_dict)) - - script.Print("Verifying current system...") - - device_specific.IncrementalOTA_VerifyBegin() - - if oem_props is None: - # When blockimgdiff version is less than 3 (non-resumable block-based OTA), - # patching on a device that's already on the target build will damage the - # system. Because operations like move don't check the block state, they - # always apply the changes unconditionally. - if blockimgdiff_version <= 2: - script.AssertSomeFingerprint(source_fp) - else: - script.AssertSomeFingerprint(source_fp, target_fp) - else: - if blockimgdiff_version <= 2: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - else: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - - if updating_boot: - boot_type, boot_device = common.GetTypeAndDevice( - "/boot", OPTIONS.source_info_dict) - d = common.Difference(target_boot, source_boot) - _, _, d = d.ComputePatch() - if d is None: - include_full_boot = True - common.ZipWriteStr(output_zip, "boot.img", target_boot.data) - else: - include_full_boot = False - - print "boot target: %d source: %d diff: %d" % ( - target_boot.size, source_boot.size, len(d)) - - common.ZipWriteStr(output_zip, "patch/boot.img.p", d) - - script.PatchCheck("%s:%s:%d:%s:%d:%s" % - (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1)) - - device_specific.IncrementalOTA_VerifyEnd() - - if OPTIONS.two_step: - script.WriteRawImage("/boot", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "2/3"); -reboot_now("%(bcb_dev)s", ""); -else -""" % bcb_dev) - - # Verify the existing partitions. - system_diff.WriteVerifyScript(script) - if vendor_diff: - vendor_diff.WriteVerifyScript(script) - - script.Comment("---- start making changes here ----") - - device_specific.IncrementalOTA_InstallBegin() - - system_diff.WriteScript(script, output_zip, - progress=0.8 if vendor_diff else 0.9) - if vendor_diff: - vendor_diff.WriteScript(script, output_zip, progress=0.1) - - if OPTIONS.two_step: - common.ZipWriteStr(output_zip, "boot.img", target_boot.data) - script.WriteRawImage("/boot", "boot.img") - print "writing full boot image (forced by two-step mode)" - - if not OPTIONS.two_step: - if updating_boot: - if include_full_boot: - print "boot image changed; including full." - script.Print("Installing boot image...") - script.WriteRawImage("/boot", "boot.img") - else: - # Produce the boot image by applying a patch to the current - # contents of the boot partition, and write it back to the - # partition. - print "boot image changed; including patch." - script.Print("Patching boot image...") - script.ShowProgress(0.1, 10) - script.ApplyPatch("%s:%s:%d:%s:%d:%s" - % (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1), - "-", - target_boot.size, target_boot.sha1, - source_boot.sha1, "patch/boot.img.p") - else: - print "boot image unchanged; skipping." - - # Do device-specific installation (eg, write radio image). - device_specific.IncrementalOTA_InstallEnd() - - if OPTIONS.extra_script is not None: - script.AppendExtra(OPTIONS.extra_script) - - if OPTIONS.wipe_user_data: - script.Print("Erasing user data...") - script.FormatPartition("/data") - - if OPTIONS.two_step: - script.AppendExtra(""" -set_stage("%(bcb_dev)s", ""); -endif; -endif; -""" % bcb_dev) - - script.SetProgress(1) - script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) - WriteMetadata(metadata, output_zip) - - -class FileDifference(object): - def __init__(self, partition, source_zip, target_zip, output_zip): - self.deferred_patch_list = None - print "Loading target..." - self.target_data = target_data = LoadPartitionFiles(target_zip, partition) - print "Loading source..." - self.source_data = source_data = LoadPartitionFiles(source_zip, partition) - - self.verbatim_targets = verbatim_targets = [] - self.patch_list = patch_list = [] - diffs = [] - self.renames = renames = {} - known_paths = set() - largest_source_size = 0 - - matching_file_cache = {} - for fn, sf in source_data.items(): - assert fn == sf.name - matching_file_cache["path:" + fn] = sf - if fn in target_data.keys(): - AddToKnownPaths(fn, known_paths) - # Only allow eligibility for filename/sha matching - # if there isn't a perfect path match. - if target_data.get(sf.name) is None: - matching_file_cache["file:" + fn.split("/")[-1]] = sf - matching_file_cache["sha:" + sf.sha1] = sf - - for fn in sorted(target_data.keys()): - tf = target_data[fn] - assert fn == tf.name - sf = ClosestFileMatch(tf, matching_file_cache, renames) - if sf is not None and sf.name != tf.name: - print "File has moved from " + sf.name + " to " + tf.name - renames[sf.name] = tf - - if sf is None or fn in OPTIONS.require_verbatim: - # This file should be included verbatim - if fn in OPTIONS.prohibit_verbatim: - raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) - print "send", fn, "verbatim" - tf.AddToZip(output_zip) - verbatim_targets.append((fn, tf.size, tf.sha1)) - if fn in target_data.keys(): - AddToKnownPaths(fn, known_paths) - elif tf.sha1 != sf.sha1: - # File is different; consider sending as a patch - diffs.append(common.Difference(tf, sf)) - else: - # Target file data identical to source (may still be renamed) - pass - - common.ComputeDifferences(diffs) - - for diff in diffs: - tf, sf, d = diff.GetPatch() - path = "/".join(tf.name.split("/")[:-1]) - if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \ - path not in known_paths: - # patch is almost as big as the file; don't bother patching - # or a patch + rename cannot take place due to the target - # directory not existing - tf.AddToZip(output_zip) - verbatim_targets.append((tf.name, tf.size, tf.sha1)) - if sf.name in renames: - del renames[sf.name] - AddToKnownPaths(tf.name, known_paths) - else: - common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) - patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest())) - largest_source_size = max(largest_source_size, sf.size) - - self.largest_source_size = largest_source_size - - def EmitVerification(self, script): - so_far = 0 - for tf, sf, _, _ in self.patch_list: - if tf.name != sf.name: - script.SkipNextActionIfTargetExists(tf.name, tf.sha1) - script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1) - so_far += sf.size - return so_far - - def EmitExplicitTargetVerification(self, script): - for fn, _, sha1 in self.verbatim_targets: - if fn[-1] != "/": - script.FileCheck("/"+fn, sha1) - for tf, _, _, _ in self.patch_list: - script.FileCheck(tf.name, tf.sha1) - - def RemoveUnneededFiles(self, script, extras=()): - script.DeleteFiles( - ["/" + i[0] for i in self.verbatim_targets] + - ["/" + i for i in sorted(self.source_data) - if i not in self.target_data and i not in self.renames] + - list(extras)) - - def TotalPatchSize(self): - return sum(i[1].size for i in self.patch_list) - - def EmitPatches(self, script, total_patch_size, so_far): - self.deferred_patch_list = deferred_patch_list = [] - for item in self.patch_list: - tf, sf, _, _ = item - if tf.name == "system/build.prop": - deferred_patch_list.append(item) - continue - if sf.name != tf.name: - script.SkipNextActionIfTargetExists(tf.name, tf.sha1) - script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1, - "patch/" + sf.name + ".p") - so_far += tf.size - script.SetProgress(so_far / total_patch_size) - return so_far - - def EmitDeferredPatches(self, script): - for item in self.deferred_patch_list: - tf, sf, _, _ = item - script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, - "patch/" + sf.name + ".p") - script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None) - - def EmitRenames(self, script): - if len(self.renames) > 0: - script.Print("Renaming files...") - for src, tgt in self.renames.iteritems(): - print "Renaming " + src + " to " + tgt.name - script.RenameFile(src, tgt.name) - - -def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): - target_has_recovery_patch = HasRecoveryPatch(target_zip) - source_has_recovery_patch = HasRecoveryPatch(source_zip) - - if (OPTIONS.block_based and - target_has_recovery_patch and - source_has_recovery_patch): - return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip) - - source_version = OPTIONS.source_info_dict["recovery_api_version"] - target_version = OPTIONS.target_info_dict["recovery_api_version"] - - if source_version == 0: - print ("WARNING: generating edify script for a source that " - "can't install it.") - script = edify_generator.EdifyGenerator( - source_version, OPTIONS.target_info_dict, - fstab=OPTIONS.source_info_dict["fstab"]) - - oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") - recovery_mount_options = OPTIONS.source_info_dict.get( - "recovery_mount_options") - oem_dict = None - if oem_props is not None and len(oem_props) > 0: - if OPTIONS.oem_source is None: - raise common.ExternalError("OEM source required for this build") - script.Mount("/oem", recovery_mount_options) - oem_dict = common.LoadDictionaryFromLines( - open(OPTIONS.oem_source).readlines()) - - if OPTIONS.override_prop: - metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } - else: - metadata = {"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, - OPTIONS.source_info_dict), - "post-timestamp": GetBuildProp("ro.build.date.utc", - OPTIONS.target_info_dict), - } - - device_specific = common.DeviceSpecificParams( - source_zip=source_zip, - source_version=source_version, - target_zip=target_zip, - target_version=target_version, - output_zip=output_zip, - script=script, - metadata=metadata, - info_dict=OPTIONS.info_dict) - - system_diff = FileDifference("system", source_zip, target_zip, output_zip) - script.Mount("/system", recovery_mount_options) - if HasVendorPartition(target_zip): - vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip) - script.Mount("/vendor", recovery_mount_options) - else: - vendor_diff = None - - if not OPTIONS.override_prop: - target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict) - source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict) - - if oem_props is None: - script.AssertSomeFingerprint(source_fp, target_fp) - else: - script.AssertSomeThumbprint( - GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), - GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) - - metadata["pre-build"] = source_fp - metadata["post-build"] = target_fp - - source_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", - OPTIONS.source_info_dict) - target_boot = common.GetBootableImage( - "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") - updating_boot = (not OPTIONS.two_step and - (source_boot.data != target_boot.data)) - - source_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", - OPTIONS.source_info_dict) - target_recovery = common.GetBootableImage( - "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") - updating_recovery = (source_recovery.data != target_recovery.data) - - # Here's how we divide up the progress bar: - # 0.1 for verifying the start state (PatchCheck calls) - # 0.8 for applying patches (ApplyPatch calls) - # 0.1 for unpacking verbatim files, symlinking, and doing the - # device-specific commands. - - AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) - device_specific.IncrementalOTA_Assertions() - - # Two-step incremental package strategy (in chronological order, - # which is *not* the order in which the generated script has - # things): - # - # if stage is not "2/3" or "3/3": - # do verification on current system - # write recovery image to boot partition - # set stage to "2/3" - # reboot to boot partition and restart recovery - # else if stage is "2/3": - # write recovery image to recovery partition - # set stage to "3/3" - # reboot to recovery partition and restart recovery - # else: - # (stage must be "3/3") - # perform update: - # patch system files, etc. - # force full install of new boot image - # set up system to update recovery partition on first boot - # complete script normally - # (allow recovery to mark itself finished and reboot) - - if OPTIONS.two_step: - if not OPTIONS.source_info_dict.get("multistage_support", None): - assert False, "two-step packages not supported by this build" - fs = OPTIONS.source_info_dict["fstab"]["/misc"] - assert fs.fs_type.upper() == "EMMC", \ - "two-step packages only supported on devices with EMMC /misc partitions" - bcb_dev = {"bcb_dev": fs.device} - common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) - script.AppendExtra(""" -if get_stage("%(bcb_dev)s") == "2/3" then -""" % bcb_dev) - script.AppendExtra("sleep(20);\n") - script.WriteRawImage("/recovery", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "3/3"); -reboot_now("%(bcb_dev)s", "recovery"); -else if get_stage("%(bcb_dev)s") != "3/3" then -""" % bcb_dev) - - # Dump fingerprints - script.Print("Source: %s" % (source_fp,)) - script.Print("Target: %s" % (target_fp,)) - - script.Print("Verifying current system...") - - device_specific.IncrementalOTA_VerifyBegin() - - script.ShowProgress(0.1, 0) - so_far = system_diff.EmitVerification(script) - if vendor_diff: - so_far += vendor_diff.EmitVerification(script) - - if updating_boot: - d = common.Difference(target_boot, source_boot) - _, _, d = d.ComputePatch() - print "boot target: %d source: %d diff: %d" % ( - target_boot.size, source_boot.size, len(d)) - - common.ZipWriteStr(output_zip, "patch/boot.img.p", d) - - boot_type, boot_device = common.GetTypeAndDevice( - "/boot", OPTIONS.source_info_dict) - - script.PatchCheck("%s:%s:%d:%s:%d:%s" % - (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1)) - so_far += source_boot.size - - size = [] - if system_diff.patch_list: - size.append(system_diff.largest_source_size) - if vendor_diff: - if vendor_diff.patch_list: - size.append(vendor_diff.largest_source_size) - if size or updating_recovery or updating_boot: - script.CacheFreeSpaceCheck(max(size)) - - device_specific.IncrementalOTA_VerifyEnd() - - if OPTIONS.two_step: - script.WriteRawImage("/boot", "recovery.img") - script.AppendExtra(""" -set_stage("%(bcb_dev)s", "2/3"); -reboot_now("%(bcb_dev)s", ""); -else -""" % bcb_dev) - - script.Comment("---- start making changes here ----") - - device_specific.IncrementalOTA_InstallBegin() - - if OPTIONS.two_step: - common.ZipWriteStr(output_zip, "boot.img", target_boot.data) - script.WriteRawImage("/boot", "boot.img") - print "writing full boot image (forced by two-step mode)" - - script.Print("Removing unneeded files...") - system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",)) - if vendor_diff: - vendor_diff.RemoveUnneededFiles(script) - - script.ShowProgress(0.8, 0) - total_patch_size = 1.0 + system_diff.TotalPatchSize() - if vendor_diff: - total_patch_size += vendor_diff.TotalPatchSize() - if updating_boot: - total_patch_size += target_boot.size - - script.Print("Patching system files...") - so_far = system_diff.EmitPatches(script, total_patch_size, 0) - if vendor_diff: - script.Print("Patching vendor files...") - so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far) - - if not OPTIONS.two_step: - if updating_boot: - # Produce the boot image by applying a patch to the current - # contents of the boot partition, and write it back to the - # partition. - script.Print("Patching boot image...") - script.ApplyPatch("%s:%s:%d:%s:%d:%s" - % (boot_type, boot_device, - source_boot.size, source_boot.sha1, - target_boot.size, target_boot.sha1), - "-", - target_boot.size, target_boot.sha1, - source_boot.sha1, "patch/boot.img.p") - so_far += target_boot.size - script.SetProgress(so_far / total_patch_size) - print "boot image changed; including." - else: - print "boot image unchanged; skipping." - - system_items = ItemSet("system", "META/filesystem_config.txt") - if vendor_diff: - vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") - - if updating_recovery: - # Recovery is generated as a patch using both the boot image - # (which contains the same linux kernel as recovery) and the file - # /system/etc/recovery-resource.dat (which contains all the images - # used in the recovery UI) as sources. This lets us minimize the - # size of the patch, which must be included in every OTA package. - # - # For older builds where recovery-resource.dat is not present, we - # use only the boot image as the source. - - if not target_has_recovery_patch: - def output_sink(fn, data): - common.ZipWriteStr(output_zip, "recovery/" + fn, data) - system_items.Get("system/" + fn) - - common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink, - target_recovery, target_boot) - script.DeleteFiles(["/system/recovery-from-boot.p", - "/system/etc/install-recovery.sh"]) - print "recovery image changed; including as patch from boot." - else: - print "recovery image unchanged; skipping." - - script.ShowProgress(0.1, 10) - - target_symlinks = CopyPartitionFiles(system_items, target_zip, None) - if vendor_diff: - target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None)) - - temp_script = script.MakeTemporary() - system_items.GetMetadata(target_zip) - system_items.Get("system").SetPermissions(temp_script) - if vendor_diff: - vendor_items.GetMetadata(target_zip) - vendor_items.Get("vendor").SetPermissions(temp_script) - - # Note that this call will mess up the trees of Items, so make sure - # we're done with them. - source_symlinks = CopyPartitionFiles(system_items, source_zip, None) - if vendor_diff: - source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None)) - - target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) - source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) - - # Delete all the symlinks in source that aren't in target. This - # needs to happen before verbatim files are unpacked, in case a - # symlink in the source is replaced by a real file in the target. - - # If a symlink in the source will be replaced by a regular file, we cannot - # delete the symlink/file in case the package gets applied again. For such - # a symlink, we prepend a sha1_check() to detect if it has been updated. - # (Bug: 23646151) - replaced_symlinks = dict() - if system_diff: - for i in system_diff.verbatim_targets: - replaced_symlinks["/%s" % (i[0],)] = i[2] - if vendor_diff: - for i in vendor_diff.verbatim_targets: - replaced_symlinks["/%s" % (i[0],)] = i[2] - - if system_diff: - for tf in system_diff.renames.values(): - replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 - if vendor_diff: - for tf in vendor_diff.renames.values(): - replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 - - always_delete = [] - may_delete = [] - for dest, link in source_symlinks: - if link not in target_symlinks_d: - if link in replaced_symlinks: - may_delete.append((link, replaced_symlinks[link])) - else: - always_delete.append(link) - script.DeleteFiles(always_delete) - script.DeleteFilesIfNotMatching(may_delete) - - if system_diff.verbatim_targets: - script.Print("Unpacking new system files...") - script.UnpackPackageDir("system", "/system") - if vendor_diff and vendor_diff.verbatim_targets: - script.Print("Unpacking new vendor files...") - script.UnpackPackageDir("vendor", "/vendor") - - if updating_recovery and not target_has_recovery_patch: - script.Print("Unpacking new recovery...") - script.UnpackPackageDir("recovery", "/system") - - system_diff.EmitRenames(script) - if vendor_diff: - vendor_diff.EmitRenames(script) - - script.Print("Symlinks and permissions...") - - # Create all the symlinks that don't already exist, or point to - # somewhere different than what we want. Delete each symlink before - # creating it, since the 'symlink' command won't overwrite. - to_create = [] - for dest, link in target_symlinks: - if link in source_symlinks_d: - if dest != source_symlinks_d[link]: - to_create.append((dest, link)) - else: - to_create.append((dest, link)) - script.DeleteFiles([i[1] for i in to_create]) - script.MakeSymlinks(to_create) - - # Now that the symlinks are created, we can set all the - # permissions. - script.AppendScript(temp_script) - - # Do device-specific installation (eg, write radio image). - device_specific.IncrementalOTA_InstallEnd() - - if OPTIONS.extra_script is not None: - script.AppendExtra(OPTIONS.extra_script) - - # Patch the build.prop file last, so if something fails but the - # device can still come up, it appears to be the old build and will - # get set the OTA package again to retry. - script.Print("Patching remaining system files...") - system_diff.EmitDeferredPatches(script) - - if OPTIONS.wipe_user_data: - script.Print("Erasing user data...") - script.FormatPartition("/data") - - if OPTIONS.two_step: - script.AppendExtra(""" -set_stage("%(bcb_dev)s", ""); -endif; -endif; -""" % bcb_dev) - - if OPTIONS.verify and system_diff: - script.Print("Remounting and verifying system partition files...") - script.Unmount("/system") - script.Mount("/system") - system_diff.EmitExplicitTargetVerification(script) - - if OPTIONS.verify and vendor_diff: - script.Print("Remounting and verifying vendor partition files...") - script.Unmount("/vendor") - script.Mount("/vendor") - vendor_diff.EmitExplicitTargetVerification(script) - script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) - - WriteMetadata(metadata, output_zip) - -def main(argv): - - def option_handler(o, a): - if o == "--board_config": - pass # deprecated - elif o in ("-k", "--package_key"): - OPTIONS.package_key = a - elif o in ("-i", "--incremental_from"): - OPTIONS.incremental_source = a - elif o == "--full_radio": - OPTIONS.full_radio = True - elif o == "--full_bootloader": - OPTIONS.full_bootloader = True - elif o in ("-w", "--wipe_user_data"): - OPTIONS.wipe_user_data = True - elif o in ("-n", "--no_prereq"): - OPTIONS.omit_prereq = True - elif o in ("-o", "--oem_settings"): - OPTIONS.oem_source = a - elif o in ("-e", "--extra_script"): - OPTIONS.extra_script = a - elif o in ("-a", "--aslr_mode"): - if a in ("on", "On", "true", "True", "yes", "Yes"): - OPTIONS.aslr_mode = True - else: - OPTIONS.aslr_mode = False - elif o in ("-t", "--worker_threads"): - if a.isdigit(): - OPTIONS.worker_threads = int(a) - else: - raise ValueError("Cannot parse value %r for option %r - only " - "integers are allowed." % (a, o)) - elif o in ("-2", "--two_step"): - OPTIONS.two_step = True - elif o == "--no_signing": - OPTIONS.no_signing = True - elif o == "--verify": - OPTIONS.verify = True - elif o == "--block": - OPTIONS.block_based = True - elif o in ("-b", "--binary"): - OPTIONS.updater_binary = a - elif o in ("--no_fallback_to_full",): - OPTIONS.fallback_to_full = False - elif o in ("--backup"): - OPTIONS.backuptool = bool(a.lower() == 'true') - elif o in ("--override_device"): - OPTIONS.override_device = a - elif o in ("--override_prop"): - OPTIONS.override_prop = bool(a.lower() == 'true') - else: - return False - return True - - args = common.ParseOptions(argv, __doc__, - extra_opts="b:k:i:d:wne:t:a:2o:", - extra_long_opts=[ - "board_config=", - "package_key=", - "incremental_from=", - "full_radio", - "full_bootloader", - "wipe_user_data", - "no_prereq", - "extra_script=", - "worker_threads=", - "aslr_mode=", - "two_step", - "no_signing", - "block", - "binary=", - "oem_settings=", - "verify", - "no_fallback_to_full", - "backup=", - "override_device=", - "override_prop=" - ], extra_option_handler=option_handler) - - if len(args) != 2: - common.Usage(__doc__) - sys.exit(1) - - if OPTIONS.extra_script is not None: - OPTIONS.extra_script = open(OPTIONS.extra_script).read() - - print "unzipping target target-files..." - OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) - - OPTIONS.target_tmp = OPTIONS.input_tmp - OPTIONS.info_dict = common.LoadInfoDict(input_zip) - - # If this image was originally labelled with SELinux contexts, make sure we - # also apply the labels in our new image. During building, the "file_contexts" - # is in the out/ directory tree, but for repacking from target-files.zip it's - # in the root directory of the ramdisk. - if "selinux_fc" in OPTIONS.info_dict: - OPTIONS.info_dict["selinux_fc"] = os.path.join( - OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts") - - if OPTIONS.verbose: - print "--- target info ---" - common.DumpInfoDict(OPTIONS.info_dict) - - # If the caller explicitly specified the device-specific extensions - # path via -s/--device_specific, use that. Otherwise, use - # META/releasetools.py if it is present in the target target_files. - # Otherwise, take the path of the file from 'tool_extensions' in the - # info dict and look for that in the local filesystem, relative to - # the current directory. - - if OPTIONS.device_specific is None: - from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") - if os.path.exists(from_input): - print "(using device-specific extensions from target_files)" - OPTIONS.device_specific = from_input - else: - OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) - - if OPTIONS.device_specific is not None: - OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) - - while True: - - if OPTIONS.no_signing: - if os.path.exists(args[1]): - os.unlink(args[1]) - output_zip = zipfile.ZipFile(args[1], "w", - compression=zipfile.ZIP_DEFLATED) - else: - temp_zip_file = tempfile.NamedTemporaryFile() - output_zip = zipfile.ZipFile(temp_zip_file, "w", - compression=zipfile.ZIP_DEFLATED) - - if OPTIONS.incremental_source is None: - WriteFullOTAPackage(input_zip, output_zip) - if OPTIONS.package_key is None: - OPTIONS.package_key = OPTIONS.info_dict.get( - "default_system_dev_certificate", - "build/target/product/security/testkey") - common.ZipClose(output_zip) - break - - else: - print "unzipping source target-files..." - OPTIONS.source_tmp, source_zip = common.UnzipTemp( - OPTIONS.incremental_source) - OPTIONS.target_info_dict = OPTIONS.info_dict - OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) - if "selinux_fc" in OPTIONS.source_info_dict: - OPTIONS.source_info_dict["selinux_fc"] = os.path.join( - OPTIONS.source_tmp, "BOOT", "RAMDISK", "file_contexts") - if OPTIONS.package_key is None: - OPTIONS.package_key = OPTIONS.source_info_dict.get( - "default_system_dev_certificate", - "build/target/product/security/testkey") - if OPTIONS.verbose: - print "--- source info ---" - common.DumpInfoDict(OPTIONS.source_info_dict) - try: - WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) - common.ZipClose(output_zip) - break - except ValueError: - if not OPTIONS.fallback_to_full: - raise - print "--- failed to build incremental; falling back to full ---" - OPTIONS.incremental_source = None - common.ZipClose(output_zip) - - if not OPTIONS.no_signing: - SignOutput(temp_zip_file.name, args[1]) - temp_zip_file.close() - - print "done." - - -if __name__ == '__main__': - try: - common.CloseInheritedPipes() - main(sys.argv[1:]) - except common.ExternalError as e: - print - print " ERROR: %s" % (e,) - print - sys.exit(1) - finally: - common.Cleanup() diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files new file mode 120000 index 000000000..6755a902f --- /dev/null +++ b/tools/releasetools/ota_from_target_files @@ -0,0 +1 @@ +ota_from_target_files.py \ No newline at end of file diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py new file mode 100755 index 000000000..c8fa8553d --- /dev/null +++ b/tools/releasetools/ota_from_target_files.py @@ -0,0 +1,1753 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Given a target-files zipfile, produces an OTA package that installs +that build. An incremental OTA is produced if -i is given, otherwise +a full OTA is produced. + +Usage: ota_from_target_files [flags] input_target_files output_ota_package + + --board_config + Deprecated. + + -k (--package_key) Key to use to sign the package (default is + the value of default_system_dev_certificate from the input + target-files's META/misc_info.txt, or + "build/target/product/security/testkey" if that value is not + specified). + + For incremental OTAs, the default value is based on the source + target-file, not the target build. + + -i (--incremental_from) + Generate an incremental OTA using the given target-files zip as + the starting build. + + --full_radio + When generating an incremental OTA, always include a full copy of + radio image. This option is only meaningful when -i is specified, + because a full radio is always included in a full OTA if applicable. + + --full_bootloader + When generating an incremental OTA, always include a full copy of + bootloader image. This option is only meaningful when -i is specified, + because a full bootloader is always included in a full OTA if applicable. + + -v (--verify) + Remount and verify the checksums of the files written to the + system and vendor (if used) partitions. Incremental builds only. + + -o (--oem_settings) + Use the file to specify the expected OEM-specific properties + on the OEM partition of the intended device. + + -w (--wipe_user_data) + Generate an OTA package that will wipe the user data partition + when installed. + + -n (--no_prereq) + Omit the timestamp prereq check normally included at the top of + the build scripts (used for developer OTA packages which + legitimately need to go back and forth). + + -e (--extra_script) + Insert the contents of file at the end of the update script. + + -a (--aslr_mode) + Specify whether to turn on ASLR for the package (on by default). + + -2 (--two_step) + Generate a 'two-step' OTA package, where recovery is updated + first, so that any changes made to the system partition are done + using the new recovery (new kernel, etc.). + + --block + Generate a block-based OTA if possible. Will fall back to a + file-based OTA if the target_files is older and doesn't support + block-based OTAs. + + -b (--binary) + Use the given binary as the update-binary in the output package, + instead of the binary in the build's target_files. Use for + development only. + + -t (--worker_threads) + Specifies the number of worker-threads that will be used when + generating patches for incremental updates (defaults to 3). + + --backup + Enable or disable the execution of backuptool.sh. + Disabled by default. + + --override_device + Override device-specific asserts. Can be a comma-separated list. + + --override_prop + Override build.prop items with custom vendor init. + Enabled when TARGET_UNIFIED_DEVICE is defined in BoardConfig + +""" + +import sys + +if sys.hexversion < 0x02070000: + print >> sys.stderr, "Python 2.7 or newer is required." + sys.exit(1) + +import multiprocessing +import os +import tempfile +import zipfile + +import common +import edify_generator +import sparse_img + +OPTIONS = common.OPTIONS +OPTIONS.package_key = None +OPTIONS.incremental_source = None +OPTIONS.verify = False +OPTIONS.require_verbatim = set() +OPTIONS.prohibit_verbatim = set(("system/build.prop",)) +OPTIONS.patch_threshold = 0.95 +OPTIONS.wipe_user_data = False +OPTIONS.omit_prereq = False +OPTIONS.extra_script = None +OPTIONS.aslr_mode = True +OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 +if OPTIONS.worker_threads == 0: + OPTIONS.worker_threads = 1 +OPTIONS.two_step = False +OPTIONS.no_signing = False +OPTIONS.block_based = False +OPTIONS.updater_binary = None +OPTIONS.oem_source = None +OPTIONS.fallback_to_full = True +OPTIONS.full_radio = False +OPTIONS.full_bootloader = False +OPTIONS.backuptool = False +OPTIONS.override_device = 'auto' +OPTIONS.override_prop = False + +def MostPopularKey(d, default): + """Given a dict, return the key corresponding to the largest + value. Returns 'default' if the dict is empty.""" + x = [(v, k) for (k, v) in d.iteritems()] + if not x: + return default + x.sort() + return x[-1][1] + + +def IsSymlink(info): + """Return true if the zipfile.ZipInfo object passed in represents a + symlink.""" + return (info.external_attr >> 16) & 0o770000 == 0o120000 + +def IsRegular(info): + """Return true if the zipfile.ZipInfo object passed in represents a + symlink.""" + return (info.external_attr >> 28) == 0o10 + +def ClosestFileMatch(src, tgtfiles, existing): + """Returns the closest file match between a source file and list + of potential matches. The exact filename match is preferred, + then the sha1 is searched for, and finally a file with the same + basename is evaluated. Rename support in the updater-binary is + required for the latter checks to be used.""" + + result = tgtfiles.get("path:" + src.name) + if result is not None: + return result + + if not OPTIONS.target_info_dict.get("update_rename_support", False): + return None + + if src.size < 1000: + return None + + result = tgtfiles.get("sha1:" + src.sha1) + if result is not None and existing.get(result.name) is None: + return result + result = tgtfiles.get("file:" + src.name.split("/")[-1]) + if result is not None and existing.get(result.name) is None: + return result + return None + +class ItemSet(object): + def __init__(self, partition, fs_config): + self.partition = partition + self.fs_config = fs_config + self.ITEMS = {} + + def Get(self, name, is_dir=False): + if name not in self.ITEMS: + self.ITEMS[name] = Item(self, name, is_dir=is_dir) + return self.ITEMS[name] + + def GetMetadata(self, input_zip): + # The target_files contains a record of what the uid, + # gid, and mode are supposed to be. + output = input_zip.read(self.fs_config) + + for line in output.split("\n"): + if not line: + continue + columns = line.split() + name, uid, gid, mode = columns[:4] + selabel = None + capabilities = None + + # After the first 4 columns, there are a series of key=value + # pairs. Extract out the fields we care about. + for element in columns[4:]: + key, value = element.split("=") + if key == "selabel": + selabel = value + if key == "capabilities": + capabilities = value + + i = self.ITEMS.get(name, None) + if i is not None: + i.uid = int(uid) + i.gid = int(gid) + i.mode = int(mode, 8) + i.selabel = selabel + i.capabilities = capabilities + if i.is_dir: + i.children.sort(key=lambda i: i.name) + + # set metadata for the files generated by this script. + i = self.ITEMS.get("system/recovery-from-boot.p", None) + if i: + i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None + i = self.ITEMS.get("system/etc/install-recovery.sh", None) + if i: + i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None + + +class Item(object): + """Items represent the metadata (user, group, mode) of files and + directories in the system image.""" + def __init__(self, itemset, name, is_dir=False): + self.itemset = itemset + self.name = name + self.uid = None + self.gid = None + self.mode = None + self.selabel = None + self.capabilities = None + self.is_dir = is_dir + self.descendants = None + self.best_subtree = None + + if name: + self.parent = itemset.Get(os.path.dirname(name), is_dir=True) + self.parent.children.append(self) + else: + self.parent = None + if self.is_dir: + self.children = [] + + def Dump(self, indent=0): + if self.uid is not None: + print "%s%s %d %d %o" % ( + " " * indent, self.name, self.uid, self.gid, self.mode) + else: + print "%s%s %s %s %s" % ( + " " * indent, self.name, self.uid, self.gid, self.mode) + if self.is_dir: + print "%s%s" % (" "*indent, self.descendants) + print "%s%s" % (" "*indent, self.best_subtree) + for i in self.children: + i.Dump(indent=indent+1) + + def CountChildMetadata(self): + """Count up the (uid, gid, mode, selabel, capabilities) tuples for + all children and determine the best strategy for using set_perm_recursive + and set_perm to correctly chown/chmod all the files to their desired + values. Recursively calls itself for all descendants. + + Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} + counting up all descendants of this node. (dmode or fmode may be None.) + Also sets the best_subtree of each directory Item to the (uid, gid, dmode, + fmode, selabel, capabilities) tuple that will match the most descendants of + that Item. + """ + + assert self.is_dir + key = (self.uid, self.gid, self.mode, None, self.selabel, + self.capabilities) + self.descendants = {key: 1} + d = self.descendants + for i in self.children: + if i.is_dir: + for k, v in i.CountChildMetadata().iteritems(): + d[k] = d.get(k, 0) + v + else: + k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities) + d[k] = d.get(k, 0) + 1 + + # Find the (uid, gid, dmode, fmode, selabel, capabilities) + # tuple that matches the most descendants. + + # First, find the (uid, gid) pair that matches the most + # descendants. + ug = {} + for (uid, gid, _, _, _, _), count in d.iteritems(): + ug[(uid, gid)] = ug.get((uid, gid), 0) + count + ug = MostPopularKey(ug, (0, 0)) + + # Now find the dmode, fmode, selabel, and capabilities that match + # the most descendants with that (uid, gid), and choose those. + best_dmode = (0, 0o755) + best_fmode = (0, 0o644) + best_selabel = (0, None) + best_capabilities = (0, None) + for k, count in d.iteritems(): + if k[:2] != ug: + continue + if k[2] is not None and count >= best_dmode[0]: + best_dmode = (count, k[2]) + if k[3] is not None and count >= best_fmode[0]: + best_fmode = (count, k[3]) + if k[4] is not None and count >= best_selabel[0]: + best_selabel = (count, k[4]) + if k[5] is not None and count >= best_capabilities[0]: + best_capabilities = (count, k[5]) + self.best_subtree = ug + ( + best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1]) + + return d + + def SetPermissions(self, script): + """Append set_perm/set_perm_recursive commands to 'script' to + set all permissions, users, and groups for the tree of files + rooted at 'self'.""" + + self.CountChildMetadata() + + def recurse(item, current): + # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple + # that the current item (and all its children) have already been set to. + # We only need to issue set_perm/set_perm_recursive commands if we're + # supposed to be something different. + if item.is_dir: + if current != item.best_subtree: + script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) + current = item.best_subtree + + if item.uid != current[0] or item.gid != current[1] or \ + item.mode != current[2] or item.selabel != current[4] or \ + item.capabilities != current[5]: + script.SetPermissions("/"+item.name, item.uid, item.gid, + item.mode, item.selabel, item.capabilities) + + for i in item.children: + recurse(i, current) + else: + if item.uid != current[0] or item.gid != current[1] or \ + item.mode != current[3] or item.selabel != current[4] or \ + item.capabilities != current[5]: + script.SetPermissions("/"+item.name, item.uid, item.gid, + item.mode, item.selabel, item.capabilities) + + recurse(self, (-1, -1, -1, -1, None, None)) + + +def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None): + """Copies files for the partition in the input zip to the output + zip. Populates the Item class with their metadata, and returns a + list of symlinks. output_zip may be None, in which case the copy is + skipped (but the other side effects still happen). substitute is an + optional dict of {output filename: contents} to be output instead of + certain input files. + """ + + symlinks = [] + + partition = itemset.partition + + for info in input_zip.infolist(): + prefix = partition.upper() + "/" + if info.filename.startswith(prefix): + basefilename = info.filename[len(prefix):] + if IsSymlink(info): + symlinks.append((input_zip.read(info.filename), + "/" + partition + "/" + basefilename)) + else: + import copy + info2 = copy.copy(info) + fn = info2.filename = partition + "/" + basefilename + if substitute and fn in substitute and substitute[fn] is None: + continue + if output_zip is not None: + if substitute and fn in substitute: + data = substitute[fn] + else: + data = input_zip.read(info.filename) + common.ZipWriteStr(output_zip, info2, data) + if fn.endswith("/"): + itemset.Get(fn[:-1], is_dir=True) + else: + itemset.Get(fn) + + symlinks.sort() + return symlinks + + +def SignOutput(temp_zip_name, output_zip_name): + key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) + pw = key_passwords[OPTIONS.package_key] + + common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, + whole_file=True) + + +def AppendAssertions(script, info_dict, oem_dict=None): + oem_props = info_dict.get("oem_fingerprint_properties") + if oem_props is None or len(oem_props) == 0: + if OPTIONS.override_device == "auto": + device = GetBuildProp("ro.product.device", info_dict) + else: + device = OPTIONS.override_device + script.AssertDevice(device) + else: + if oem_dict is None: + raise common.ExternalError( + "No OEM file provided to answer expected assertions") + for prop in oem_props.split(): + if oem_dict.get(prop) is None: + raise common.ExternalError( + "The OEM file is missing the property %s" % prop) + script.AssertOemProperty(prop, oem_dict.get(prop)) + + +def HasRecoveryPatch(target_files_zip): + try: + target_files_zip.getinfo("SYSTEM/recovery-from-boot.p") + return True + except KeyError: + return False + +def HasVendorPartition(target_files_zip): + try: + target_files_zip.getinfo("VENDOR/") + return True + except KeyError: + return False + +def GetOemProperty(name, oem_props, oem_dict, info_dict): + if oem_props is not None and name in oem_props: + return oem_dict[name] + return GetBuildProp(name, info_dict) + +def CalculateFingerprint(oem_props, oem_dict, info_dict): + if oem_props is None: + return GetBuildProp("ro.build.fingerprint", info_dict) + return "%s/%s/%s:%s" % ( + GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict), + GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict), + GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict), + GetBuildProp("ro.build.thumbprint", info_dict)) + + +def GetImage(which, tmpdir, info_dict): + # Return an image object (suitable for passing to BlockImageDiff) + # for the 'which' partition (most be "system" or "vendor"). If a + # prebuilt image and file map are found in tmpdir they are used, + # otherwise they are reconstructed from the individual files. + + assert which in ("system", "vendor") + + path = os.path.join(tmpdir, "IMAGES", which + ".img") + mappath = os.path.join(tmpdir, "IMAGES", which + ".map") + if os.path.exists(path) and os.path.exists(mappath): + print "using %s.img from target-files" % (which,) + # This is a 'new' target-files, which already has the image in it. + + else: + print "building %s.img from target-files" % (which,) + + # This is an 'old' target-files, which does not contain images + # already built. Build them. + + mappath = tempfile.mkstemp()[1] + OPTIONS.tempfiles.append(mappath) + + import add_img_to_target_files + if which == "system": + path = add_img_to_target_files.BuildSystem( + tmpdir, info_dict, block_list=mappath) + elif which == "vendor": + path = add_img_to_target_files.BuildVendor( + tmpdir, info_dict, block_list=mappath) + + # Bug: http://b/20939131 + # In ext4 filesystems, block 0 might be changed even being mounted + # R/O. We add it to clobbered_blocks so that it will be written to the + # target unconditionally. Note that they are still part of care_map. + clobbered_blocks = "0" + + return sparse_img.SparseImage(path, mappath, clobbered_blocks) + + +def CopyInstallTools(output_zip): + install_path = os.path.join(OPTIONS.input_tmp, "INSTALL") + for root, subdirs, files in os.walk(install_path): + for f in files: + install_source = os.path.join(root, f) + install_target = os.path.join("install", os.path.relpath(root, install_path), f) + output_zip.write(install_source, install_target) + + +def WriteFullOTAPackage(input_zip, output_zip): + # TODO: how to determine this? We don't know what version it will + # be installed on top of. For now, we expect the API just won't + # change very often. Similarly for fstab, it might have changed + # in the target build. + script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) + + oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") + recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") + oem_dict = None + if oem_props is not None and len(oem_props) > 0: + if OPTIONS.oem_source is None: + raise common.ExternalError("OEM source required for this build") + script.Mount("/oem", recovery_mount_options) + oem_dict = common.LoadDictionaryFromLines( + open(OPTIONS.oem_source).readlines()) + + if OPTIONS.override_prop: + metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.info_dict), + } + else: + metadata = {"post-build": CalculateFingerprint( + oem_props, oem_dict, OPTIONS.info_dict), + "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, + OPTIONS.info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.info_dict), + } + + device_specific = common.DeviceSpecificParams( + input_zip=input_zip, + input_version=OPTIONS.info_dict["recovery_api_version"], + output_zip=output_zip, + script=script, + input_tmp=OPTIONS.input_tmp, + metadata=metadata, + info_dict=OPTIONS.source_info_dict) + + has_recovery_patch = HasRecoveryPatch(input_zip) + block_based = OPTIONS.block_based and has_recovery_patch + + #if not OPTIONS.omit_prereq: + # ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) + # ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict) + # script.AssertOlderBuild(ts, ts_text) + + AppendAssertions(script, OPTIONS.info_dict, oem_dict) + device_specific.FullOTA_Assertions() + + # Two-step package strategy (in chronological order, which is *not* + # the order in which the generated script has things): + # + # if stage is not "2/3" or "3/3": + # write recovery image to boot partition + # set stage to "2/3" + # reboot to boot partition and restart recovery + # else if stage is "2/3": + # write recovery image to recovery partition + # set stage to "3/3" + # reboot to recovery partition and restart recovery + # else: + # (stage must be "3/3") + # set stage to "" + # do normal full package installation: + # wipe and install system, boot image, etc. + # set up system to update recovery partition on first boot + # complete script normally + # (allow recovery to mark itself finished and reboot) + + recovery_img = common.GetBootableImage("recovery.img", "recovery.img", + OPTIONS.input_tmp, "RECOVERY") + if OPTIONS.two_step: + if not OPTIONS.info_dict.get("multistage_support", None): + assert False, "two-step packages not supported by this build" + fs = OPTIONS.info_dict["fstab"]["/misc"] + assert fs.fs_type.upper() == "EMMC", \ + "two-step packages only supported on devices with EMMC /misc partitions" + bcb_dev = {"bcb_dev": fs.device} + common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) + script.AppendExtra(""" +if get_stage("%(bcb_dev)s") == "2/3" then +""" % bcb_dev) + script.WriteRawImage("/recovery", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "3/3"); +reboot_now("%(bcb_dev)s", "recovery"); +else if get_stage("%(bcb_dev)s") == "3/3" then +""" % bcb_dev) + + # Dump fingerprints + script.Print("Target: %s" % CalculateFingerprint( + oem_props, oem_dict, OPTIONS.info_dict)) + + script.AppendExtra("ifelse(is_mounted(\"/system\"), unmount(\"/system\"));") + device_specific.FullOTA_InstallBegin() + + CopyInstallTools(output_zip) + script.UnpackPackageDir("install", "/tmp/install") + script.SetPermissionsRecursive("/tmp/install", 0, 0, 0755, 0644, None, None) + script.SetPermissionsRecursive("/tmp/install/bin", 0, 0, 0755, 0755, None, None) + + if OPTIONS.backuptool: + script.Mount("/system") + script.RunBackup("backup") + script.Unmount("/system") + + system_progress = 0.75 + + if OPTIONS.wipe_user_data: + system_progress -= 0.1 + if HasVendorPartition(input_zip): + system_progress -= 0.1 + + script.AppendExtra("if is_mounted(\"/data\") then") + script.ValidateSignatures("data") + script.AppendExtra("else") + script.Mount("/data") + script.ValidateSignatures("data") + script.Unmount("/data") + script.AppendExtra("endif;") + + if "selinux_fc" in OPTIONS.info_dict: + WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) + + recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") + + system_items = ItemSet("system", "META/filesystem_config.txt") + script.ShowProgress(system_progress, 0) + + if block_based: + # Full OTA is done as an "incremental" against an empty source + # image. This has the effect of writing new data from the package + # to the entire partition, but lets us reuse the updater code that + # writes incrementals to do it. + system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) + system_tgt.ResetFileMap() + system_diff = common.BlockDifference("system", system_tgt, src=None) + system_diff.WriteScript(script, output_zip) + else: + script.FormatPartition("/system") + script.Mount("/system", recovery_mount_options) + if not has_recovery_patch: + script.UnpackPackageDir("recovery", "/system") + script.UnpackPackageDir("system", "/system") + + symlinks = CopyPartitionFiles(system_items, input_zip, output_zip) + script.MakeSymlinks(symlinks) + + boot_img = common.GetBootableImage("boot.img", "boot.img", + OPTIONS.input_tmp, "BOOT") + + if not block_based: + def output_sink(fn, data): + common.ZipWriteStr(output_zip, "recovery/" + fn, data) + system_items.Get("system/" + fn) + + common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, + recovery_img, boot_img) + + system_items.GetMetadata(input_zip) + system_items.Get("system").SetPermissions(script) + + if HasVendorPartition(input_zip): + vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") + script.ShowProgress(0.1, 0) + + if block_based: + vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) + vendor_tgt.ResetFileMap() + vendor_diff = common.BlockDifference("vendor", vendor_tgt) + vendor_diff.WriteScript(script, output_zip) + else: + script.FormatPartition("/vendor") + script.Mount("/vendor", recovery_mount_options) + script.UnpackPackageDir("vendor", "/vendor") + + symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip) + script.MakeSymlinks(symlinks) + + vendor_items.GetMetadata(input_zip) + vendor_items.Get("vendor").SetPermissions(script) + + common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) + common.ZipWriteStr(output_zip, "boot.img", boot_img.data) + + device_specific.FullOTA_PostValidate() + + if OPTIONS.backuptool: + script.ShowProgress(0.02, 10) + if block_based: + script.Mount("/system") + script.RunBackup("restore") + if block_based: + script.Unmount("/system") + + script.ShowProgress(0.05, 5) + script.WriteRawImage("/boot", "boot.img") + + script.ShowProgress(0.2, 10) + device_specific.FullOTA_InstallEnd() + + if OPTIONS.extra_script is not None: + script.AppendExtra(OPTIONS.extra_script) + + script.UnmountAll() + + if OPTIONS.wipe_user_data: + script.ShowProgress(0.1, 10) + script.FormatPartition("/data") + + if OPTIONS.two_step: + script.AppendExtra(""" +set_stage("%(bcb_dev)s", ""); +""" % bcb_dev) + script.AppendExtra("else\n") + script.WriteRawImage("/boot", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "2/3"); +reboot_now("%(bcb_dev)s", ""); +endif; +endif; +""" % bcb_dev) + script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) + WriteMetadata(metadata, output_zip) + + common.ZipWriteStr(output_zip, "system/build.prop", + ""+input_zip.read("SYSTEM/build.prop")) + + common.ZipWriteStr(output_zip, "META-INF/org/cyanogenmod/releasekey", + ""+input_zip.read("META/releasekey.txt")) + +def WritePolicyConfig(file_name, output_zip): + common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) + +def WriteMetadata(metadata, output_zip): + common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", + "".join(["%s=%s\n" % kv + for kv in sorted(metadata.iteritems())])) + + +def LoadPartitionFiles(z, partition): + """Load all the files from the given partition in a given target-files + ZipFile, and return a dict of {filename: File object}.""" + out = {} + prefix = partition.upper() + "/" + for info in z.infolist(): + if info.filename.startswith(prefix) and not IsSymlink(info): + basefilename = info.filename[len(prefix):] + fn = partition + "/" + basefilename + data = z.read(info.filename) + out[fn] = common.File(fn, data) + return out + + +def GetBuildProp(prop, info_dict): + """Return the fingerprint of the build of a given target-files info_dict.""" + try: + return info_dict.get("build.prop", {})[prop] + except KeyError: + raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) + + +def AddToKnownPaths(filename, known_paths): + if filename[-1] == "/": + return + dirs = filename.split("/")[:-1] + while len(dirs) > 0: + path = "/".join(dirs) + if path in known_paths: + break + known_paths.add(path) + dirs.pop() + + +def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): + source_version = OPTIONS.source_info_dict["recovery_api_version"] + target_version = OPTIONS.target_info_dict["recovery_api_version"] + + if source_version == 0: + print ("WARNING: generating edify script for a source that " + "can't install it.") + script = edify_generator.EdifyGenerator( + source_version, OPTIONS.target_info_dict, + fstab=OPTIONS.source_info_dict["fstab"]) + + if OPTIONS.override_prop: + metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + else: + metadata = {"pre-device": GetBuildProp("ro.product.device", + OPTIONS.source_info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + + device_specific = common.DeviceSpecificParams( + source_zip=source_zip, + source_version=source_version, + target_zip=target_zip, + target_version=target_version, + output_zip=output_zip, + script=script, + metadata=metadata, + info_dict=OPTIONS.source_info_dict) + + # TODO: Currently this works differently from WriteIncrementalOTAPackage(). + # This function doesn't consider thumbprints when writing + # metadata["pre/post-build"]. One possible reason is that the current + # devices with thumbprints are all using file-based OTAs. Long term we + # should factor out the common parts into a shared one to avoid further + # divergence. + source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) + target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) + metadata["pre-build"] = source_fp + metadata["post-build"] = target_fp + + source_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", + OPTIONS.source_info_dict) + target_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") + updating_boot = (not OPTIONS.two_step and + (source_boot.data != target_boot.data)) + + target_recovery = common.GetBootableImage( + "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") + + system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict) + system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict) + + blockimgdiff_version = 1 + if OPTIONS.info_dict: + blockimgdiff_version = max( + int(i) for i in + OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) + + system_diff = common.BlockDifference("system", system_tgt, system_src, + version=blockimgdiff_version) + + if HasVendorPartition(target_zip): + if not HasVendorPartition(source_zip): + raise RuntimeError("can't generate incremental that adds /vendor") + vendor_src = GetImage("vendor", OPTIONS.source_tmp, + OPTIONS.source_info_dict) + vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, + OPTIONS.target_info_dict) + vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, + version=blockimgdiff_version) + else: + vendor_diff = None + + oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties") + recovery_mount_options = OPTIONS.source_info_dict.get( + "recovery_mount_options") + oem_dict = None + if oem_props is not None and len(oem_props) > 0: + if OPTIONS.oem_source is None: + raise common.ExternalError("OEM source required for this build") + script.Mount("/oem", recovery_mount_options) + oem_dict = common.LoadDictionaryFromLines( + open(OPTIONS.oem_source).readlines()) + + AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) + device_specific.IncrementalOTA_Assertions() + + # Two-step incremental package strategy (in chronological order, + # which is *not* the order in which the generated script has + # things): + # + # if stage is not "2/3" or "3/3": + # do verification on current system + # write recovery image to boot partition + # set stage to "2/3" + # reboot to boot partition and restart recovery + # else if stage is "2/3": + # write recovery image to recovery partition + # set stage to "3/3" + # reboot to recovery partition and restart recovery + # else: + # (stage must be "3/3") + # perform update: + # patch system files, etc. + # force full install of new boot image + # set up system to update recovery partition on first boot + # complete script normally + # (allow recovery to mark itself finished and reboot) + + if OPTIONS.two_step: + if not OPTIONS.source_info_dict.get("multistage_support", None): + assert False, "two-step packages not supported by this build" + fs = OPTIONS.source_info_dict["fstab"]["/misc"] + assert fs.fs_type.upper() == "EMMC", \ + "two-step packages only supported on devices with EMMC /misc partitions" + bcb_dev = {"bcb_dev": fs.device} + common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) + script.AppendExtra(""" +if get_stage("%(bcb_dev)s") == "2/3" then +""" % bcb_dev) + script.AppendExtra("sleep(20);\n") + script.WriteRawImage("/recovery", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "3/3"); +reboot_now("%(bcb_dev)s", "recovery"); +else if get_stage("%(bcb_dev)s") != "3/3" then +""" % bcb_dev) + + # Dump fingerprints + script.Print("Source: %s" % CalculateFingerprint( + oem_props, oem_dict, OPTIONS.source_info_dict)) + script.Print("Target: %s" % CalculateFingerprint( + oem_props, oem_dict, OPTIONS.target_info_dict)) + + script.Print("Verifying current system...") + + device_specific.IncrementalOTA_VerifyBegin() + + if oem_props is None: + # When blockimgdiff version is less than 3 (non-resumable block-based OTA), + # patching on a device that's already on the target build will damage the + # system. Because operations like move don't check the block state, they + # always apply the changes unconditionally. + if blockimgdiff_version <= 2: + script.AssertSomeFingerprint(source_fp) + else: + script.AssertSomeFingerprint(source_fp, target_fp) + else: + if blockimgdiff_version <= 2: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + else: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + + if updating_boot: + boot_type, boot_device = common.GetTypeAndDevice( + "/boot", OPTIONS.source_info_dict) + d = common.Difference(target_boot, source_boot) + _, _, d = d.ComputePatch() + if d is None: + include_full_boot = True + common.ZipWriteStr(output_zip, "boot.img", target_boot.data) + else: + include_full_boot = False + + print "boot target: %d source: %d diff: %d" % ( + target_boot.size, source_boot.size, len(d)) + + common.ZipWriteStr(output_zip, "patch/boot.img.p", d) + + script.PatchCheck("%s:%s:%d:%s:%d:%s" % + (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1)) + + device_specific.IncrementalOTA_VerifyEnd() + + if OPTIONS.two_step: + script.WriteRawImage("/boot", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "2/3"); +reboot_now("%(bcb_dev)s", ""); +else +""" % bcb_dev) + + # Verify the existing partitions. + system_diff.WriteVerifyScript(script) + if vendor_diff: + vendor_diff.WriteVerifyScript(script) + + script.Comment("---- start making changes here ----") + + device_specific.IncrementalOTA_InstallBegin() + + system_diff.WriteScript(script, output_zip, + progress=0.8 if vendor_diff else 0.9) + if vendor_diff: + vendor_diff.WriteScript(script, output_zip, progress=0.1) + + if OPTIONS.two_step: + common.ZipWriteStr(output_zip, "boot.img", target_boot.data) + script.WriteRawImage("/boot", "boot.img") + print "writing full boot image (forced by two-step mode)" + + if not OPTIONS.two_step: + if updating_boot: + if include_full_boot: + print "boot image changed; including full." + script.Print("Installing boot image...") + script.WriteRawImage("/boot", "boot.img") + else: + # Produce the boot image by applying a patch to the current + # contents of the boot partition, and write it back to the + # partition. + print "boot image changed; including patch." + script.Print("Patching boot image...") + script.ShowProgress(0.1, 10) + script.ApplyPatch("%s:%s:%d:%s:%d:%s" + % (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1), + "-", + target_boot.size, target_boot.sha1, + source_boot.sha1, "patch/boot.img.p") + else: + print "boot image unchanged; skipping." + + # Do device-specific installation (eg, write radio image). + device_specific.IncrementalOTA_InstallEnd() + + if OPTIONS.extra_script is not None: + script.AppendExtra(OPTIONS.extra_script) + + if OPTIONS.wipe_user_data: + script.Print("Erasing user data...") + script.FormatPartition("/data") + + if OPTIONS.two_step: + script.AppendExtra(""" +set_stage("%(bcb_dev)s", ""); +endif; +endif; +""" % bcb_dev) + + script.SetProgress(1) + script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) + WriteMetadata(metadata, output_zip) + + +class FileDifference(object): + def __init__(self, partition, source_zip, target_zip, output_zip): + self.deferred_patch_list = None + print "Loading target..." + self.target_data = target_data = LoadPartitionFiles(target_zip, partition) + print "Loading source..." + self.source_data = source_data = LoadPartitionFiles(source_zip, partition) + + self.verbatim_targets = verbatim_targets = [] + self.patch_list = patch_list = [] + diffs = [] + self.renames = renames = {} + known_paths = set() + largest_source_size = 0 + + matching_file_cache = {} + for fn, sf in source_data.items(): + assert fn == sf.name + matching_file_cache["path:" + fn] = sf + if fn in target_data.keys(): + AddToKnownPaths(fn, known_paths) + # Only allow eligibility for filename/sha matching + # if there isn't a perfect path match. + if target_data.get(sf.name) is None: + matching_file_cache["file:" + fn.split("/")[-1]] = sf + matching_file_cache["sha:" + sf.sha1] = sf + + for fn in sorted(target_data.keys()): + tf = target_data[fn] + assert fn == tf.name + sf = ClosestFileMatch(tf, matching_file_cache, renames) + if sf is not None and sf.name != tf.name: + print "File has moved from " + sf.name + " to " + tf.name + renames[sf.name] = tf + + if sf is None or fn in OPTIONS.require_verbatim: + # This file should be included verbatim + if fn in OPTIONS.prohibit_verbatim: + raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) + print "send", fn, "verbatim" + tf.AddToZip(output_zip) + verbatim_targets.append((fn, tf.size, tf.sha1)) + if fn in target_data.keys(): + AddToKnownPaths(fn, known_paths) + elif tf.sha1 != sf.sha1: + # File is different; consider sending as a patch + diffs.append(common.Difference(tf, sf)) + else: + # Target file data identical to source (may still be renamed) + pass + + common.ComputeDifferences(diffs) + + for diff in diffs: + tf, sf, d = diff.GetPatch() + path = "/".join(tf.name.split("/")[:-1]) + if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \ + path not in known_paths: + # patch is almost as big as the file; don't bother patching + # or a patch + rename cannot take place due to the target + # directory not existing + tf.AddToZip(output_zip) + verbatim_targets.append((tf.name, tf.size, tf.sha1)) + if sf.name in renames: + del renames[sf.name] + AddToKnownPaths(tf.name, known_paths) + else: + common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) + patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest())) + largest_source_size = max(largest_source_size, sf.size) + + self.largest_source_size = largest_source_size + + def EmitVerification(self, script): + so_far = 0 + for tf, sf, _, _ in self.patch_list: + if tf.name != sf.name: + script.SkipNextActionIfTargetExists(tf.name, tf.sha1) + script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1) + so_far += sf.size + return so_far + + def EmitExplicitTargetVerification(self, script): + for fn, _, sha1 in self.verbatim_targets: + if fn[-1] != "/": + script.FileCheck("/"+fn, sha1) + for tf, _, _, _ in self.patch_list: + script.FileCheck(tf.name, tf.sha1) + + def RemoveUnneededFiles(self, script, extras=()): + script.DeleteFiles( + ["/" + i[0] for i in self.verbatim_targets] + + ["/" + i for i in sorted(self.source_data) + if i not in self.target_data and i not in self.renames] + + list(extras)) + + def TotalPatchSize(self): + return sum(i[1].size for i in self.patch_list) + + def EmitPatches(self, script, total_patch_size, so_far): + self.deferred_patch_list = deferred_patch_list = [] + for item in self.patch_list: + tf, sf, _, _ = item + if tf.name == "system/build.prop": + deferred_patch_list.append(item) + continue + if sf.name != tf.name: + script.SkipNextActionIfTargetExists(tf.name, tf.sha1) + script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1, + "patch/" + sf.name + ".p") + so_far += tf.size + script.SetProgress(so_far / total_patch_size) + return so_far + + def EmitDeferredPatches(self, script): + for item in self.deferred_patch_list: + tf, sf, _, _ = item + script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, + "patch/" + sf.name + ".p") + script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None) + + def EmitRenames(self, script): + if len(self.renames) > 0: + script.Print("Renaming files...") + for src, tgt in self.renames.iteritems(): + print "Renaming " + src + " to " + tgt.name + script.RenameFile(src, tgt.name) + + +def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): + target_has_recovery_patch = HasRecoveryPatch(target_zip) + source_has_recovery_patch = HasRecoveryPatch(source_zip) + + if (OPTIONS.block_based and + target_has_recovery_patch and + source_has_recovery_patch): + return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip) + + source_version = OPTIONS.source_info_dict["recovery_api_version"] + target_version = OPTIONS.target_info_dict["recovery_api_version"] + + if source_version == 0: + print ("WARNING: generating edify script for a source that " + "can't install it.") + script = edify_generator.EdifyGenerator( + source_version, OPTIONS.target_info_dict, + fstab=OPTIONS.source_info_dict["fstab"]) + + oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") + recovery_mount_options = OPTIONS.source_info_dict.get( + "recovery_mount_options") + oem_dict = None + if oem_props is not None and len(oem_props) > 0: + if OPTIONS.oem_source is None: + raise common.ExternalError("OEM source required for this build") + script.Mount("/oem", recovery_mount_options) + oem_dict = common.LoadDictionaryFromLines( + open(OPTIONS.oem_source).readlines()) + + if OPTIONS.override_prop: + metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + else: + metadata = {"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, + OPTIONS.source_info_dict), + "post-timestamp": GetBuildProp("ro.build.date.utc", + OPTIONS.target_info_dict), + } + + device_specific = common.DeviceSpecificParams( + source_zip=source_zip, + source_version=source_version, + target_zip=target_zip, + target_version=target_version, + output_zip=output_zip, + script=script, + metadata=metadata, + info_dict=OPTIONS.info_dict) + + system_diff = FileDifference("system", source_zip, target_zip, output_zip) + script.Mount("/system", recovery_mount_options) + if HasVendorPartition(target_zip): + vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip) + script.Mount("/vendor", recovery_mount_options) + else: + vendor_diff = None + + if not OPTIONS.override_prop: + target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict) + source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict) + + if oem_props is None: + script.AssertSomeFingerprint(source_fp, target_fp) + else: + script.AssertSomeThumbprint( + GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), + GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) + + metadata["pre-build"] = source_fp + metadata["post-build"] = target_fp + + source_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", + OPTIONS.source_info_dict) + target_boot = common.GetBootableImage( + "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") + updating_boot = (not OPTIONS.two_step and + (source_boot.data != target_boot.data)) + + source_recovery = common.GetBootableImage( + "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", + OPTIONS.source_info_dict) + target_recovery = common.GetBootableImage( + "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") + updating_recovery = (source_recovery.data != target_recovery.data) + + # Here's how we divide up the progress bar: + # 0.1 for verifying the start state (PatchCheck calls) + # 0.8 for applying patches (ApplyPatch calls) + # 0.1 for unpacking verbatim files, symlinking, and doing the + # device-specific commands. + + AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) + device_specific.IncrementalOTA_Assertions() + + # Two-step incremental package strategy (in chronological order, + # which is *not* the order in which the generated script has + # things): + # + # if stage is not "2/3" or "3/3": + # do verification on current system + # write recovery image to boot partition + # set stage to "2/3" + # reboot to boot partition and restart recovery + # else if stage is "2/3": + # write recovery image to recovery partition + # set stage to "3/3" + # reboot to recovery partition and restart recovery + # else: + # (stage must be "3/3") + # perform update: + # patch system files, etc. + # force full install of new boot image + # set up system to update recovery partition on first boot + # complete script normally + # (allow recovery to mark itself finished and reboot) + + if OPTIONS.two_step: + if not OPTIONS.source_info_dict.get("multistage_support", None): + assert False, "two-step packages not supported by this build" + fs = OPTIONS.source_info_dict["fstab"]["/misc"] + assert fs.fs_type.upper() == "EMMC", \ + "two-step packages only supported on devices with EMMC /misc partitions" + bcb_dev = {"bcb_dev": fs.device} + common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) + script.AppendExtra(""" +if get_stage("%(bcb_dev)s") == "2/3" then +""" % bcb_dev) + script.AppendExtra("sleep(20);\n") + script.WriteRawImage("/recovery", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "3/3"); +reboot_now("%(bcb_dev)s", "recovery"); +else if get_stage("%(bcb_dev)s") != "3/3" then +""" % bcb_dev) + + # Dump fingerprints + script.Print("Source: %s" % (source_fp,)) + script.Print("Target: %s" % (target_fp,)) + + script.Print("Verifying current system...") + + device_specific.IncrementalOTA_VerifyBegin() + + script.ShowProgress(0.1, 0) + so_far = system_diff.EmitVerification(script) + if vendor_diff: + so_far += vendor_diff.EmitVerification(script) + + if updating_boot: + d = common.Difference(target_boot, source_boot) + _, _, d = d.ComputePatch() + print "boot target: %d source: %d diff: %d" % ( + target_boot.size, source_boot.size, len(d)) + + common.ZipWriteStr(output_zip, "patch/boot.img.p", d) + + boot_type, boot_device = common.GetTypeAndDevice( + "/boot", OPTIONS.source_info_dict) + + script.PatchCheck("%s:%s:%d:%s:%d:%s" % + (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1)) + so_far += source_boot.size + + size = [] + if system_diff.patch_list: + size.append(system_diff.largest_source_size) + if vendor_diff: + if vendor_diff.patch_list: + size.append(vendor_diff.largest_source_size) + if size or updating_recovery or updating_boot: + script.CacheFreeSpaceCheck(max(size)) + + device_specific.IncrementalOTA_VerifyEnd() + + if OPTIONS.two_step: + script.WriteRawImage("/boot", "recovery.img") + script.AppendExtra(""" +set_stage("%(bcb_dev)s", "2/3"); +reboot_now("%(bcb_dev)s", ""); +else +""" % bcb_dev) + + script.Comment("---- start making changes here ----") + + device_specific.IncrementalOTA_InstallBegin() + + if OPTIONS.two_step: + common.ZipWriteStr(output_zip, "boot.img", target_boot.data) + script.WriteRawImage("/boot", "boot.img") + print "writing full boot image (forced by two-step mode)" + + script.Print("Removing unneeded files...") + system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",)) + if vendor_diff: + vendor_diff.RemoveUnneededFiles(script) + + script.ShowProgress(0.8, 0) + total_patch_size = 1.0 + system_diff.TotalPatchSize() + if vendor_diff: + total_patch_size += vendor_diff.TotalPatchSize() + if updating_boot: + total_patch_size += target_boot.size + + script.Print("Patching system files...") + so_far = system_diff.EmitPatches(script, total_patch_size, 0) + if vendor_diff: + script.Print("Patching vendor files...") + so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far) + + if not OPTIONS.two_step: + if updating_boot: + # Produce the boot image by applying a patch to the current + # contents of the boot partition, and write it back to the + # partition. + script.Print("Patching boot image...") + script.ApplyPatch("%s:%s:%d:%s:%d:%s" + % (boot_type, boot_device, + source_boot.size, source_boot.sha1, + target_boot.size, target_boot.sha1), + "-", + target_boot.size, target_boot.sha1, + source_boot.sha1, "patch/boot.img.p") + so_far += target_boot.size + script.SetProgress(so_far / total_patch_size) + print "boot image changed; including." + else: + print "boot image unchanged; skipping." + + system_items = ItemSet("system", "META/filesystem_config.txt") + if vendor_diff: + vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") + + if updating_recovery: + # Recovery is generated as a patch using both the boot image + # (which contains the same linux kernel as recovery) and the file + # /system/etc/recovery-resource.dat (which contains all the images + # used in the recovery UI) as sources. This lets us minimize the + # size of the patch, which must be included in every OTA package. + # + # For older builds where recovery-resource.dat is not present, we + # use only the boot image as the source. + + if not target_has_recovery_patch: + def output_sink(fn, data): + common.ZipWriteStr(output_zip, "recovery/" + fn, data) + system_items.Get("system/" + fn) + + common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink, + target_recovery, target_boot) + script.DeleteFiles(["/system/recovery-from-boot.p", + "/system/etc/install-recovery.sh"]) + print "recovery image changed; including as patch from boot." + else: + print "recovery image unchanged; skipping." + + script.ShowProgress(0.1, 10) + + target_symlinks = CopyPartitionFiles(system_items, target_zip, None) + if vendor_diff: + target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None)) + + temp_script = script.MakeTemporary() + system_items.GetMetadata(target_zip) + system_items.Get("system").SetPermissions(temp_script) + if vendor_diff: + vendor_items.GetMetadata(target_zip) + vendor_items.Get("vendor").SetPermissions(temp_script) + + # Note that this call will mess up the trees of Items, so make sure + # we're done with them. + source_symlinks = CopyPartitionFiles(system_items, source_zip, None) + if vendor_diff: + source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None)) + + target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) + source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) + + # Delete all the symlinks in source that aren't in target. This + # needs to happen before verbatim files are unpacked, in case a + # symlink in the source is replaced by a real file in the target. + + # If a symlink in the source will be replaced by a regular file, we cannot + # delete the symlink/file in case the package gets applied again. For such + # a symlink, we prepend a sha1_check() to detect if it has been updated. + # (Bug: 23646151) + replaced_symlinks = dict() + if system_diff: + for i in system_diff.verbatim_targets: + replaced_symlinks["/%s" % (i[0],)] = i[2] + if vendor_diff: + for i in vendor_diff.verbatim_targets: + replaced_symlinks["/%s" % (i[0],)] = i[2] + + if system_diff: + for tf in system_diff.renames.values(): + replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 + if vendor_diff: + for tf in vendor_diff.renames.values(): + replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 + + always_delete = [] + may_delete = [] + for dest, link in source_symlinks: + if link not in target_symlinks_d: + if link in replaced_symlinks: + may_delete.append((link, replaced_symlinks[link])) + else: + always_delete.append(link) + script.DeleteFiles(always_delete) + script.DeleteFilesIfNotMatching(may_delete) + + if system_diff.verbatim_targets: + script.Print("Unpacking new system files...") + script.UnpackPackageDir("system", "/system") + if vendor_diff and vendor_diff.verbatim_targets: + script.Print("Unpacking new vendor files...") + script.UnpackPackageDir("vendor", "/vendor") + + if updating_recovery and not target_has_recovery_patch: + script.Print("Unpacking new recovery...") + script.UnpackPackageDir("recovery", "/system") + + system_diff.EmitRenames(script) + if vendor_diff: + vendor_diff.EmitRenames(script) + + script.Print("Symlinks and permissions...") + + # Create all the symlinks that don't already exist, or point to + # somewhere different than what we want. Delete each symlink before + # creating it, since the 'symlink' command won't overwrite. + to_create = [] + for dest, link in target_symlinks: + if link in source_symlinks_d: + if dest != source_symlinks_d[link]: + to_create.append((dest, link)) + else: + to_create.append((dest, link)) + script.DeleteFiles([i[1] for i in to_create]) + script.MakeSymlinks(to_create) + + # Now that the symlinks are created, we can set all the + # permissions. + script.AppendScript(temp_script) + + # Do device-specific installation (eg, write radio image). + device_specific.IncrementalOTA_InstallEnd() + + if OPTIONS.extra_script is not None: + script.AppendExtra(OPTIONS.extra_script) + + # Patch the build.prop file last, so if something fails but the + # device can still come up, it appears to be the old build and will + # get set the OTA package again to retry. + script.Print("Patching remaining system files...") + system_diff.EmitDeferredPatches(script) + + if OPTIONS.wipe_user_data: + script.Print("Erasing user data...") + script.FormatPartition("/data") + + if OPTIONS.two_step: + script.AppendExtra(""" +set_stage("%(bcb_dev)s", ""); +endif; +endif; +""" % bcb_dev) + + if OPTIONS.verify and system_diff: + script.Print("Remounting and verifying system partition files...") + script.Unmount("/system") + script.Mount("/system") + system_diff.EmitExplicitTargetVerification(script) + + if OPTIONS.verify and vendor_diff: + script.Print("Remounting and verifying vendor partition files...") + script.Unmount("/vendor") + script.Mount("/vendor") + vendor_diff.EmitExplicitTargetVerification(script) + script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) + + WriteMetadata(metadata, output_zip) + +def main(argv): + + def option_handler(o, a): + if o == "--board_config": + pass # deprecated + elif o in ("-k", "--package_key"): + OPTIONS.package_key = a + elif o in ("-i", "--incremental_from"): + OPTIONS.incremental_source = a + elif o == "--full_radio": + OPTIONS.full_radio = True + elif o == "--full_bootloader": + OPTIONS.full_bootloader = True + elif o in ("-w", "--wipe_user_data"): + OPTIONS.wipe_user_data = True + elif o in ("-n", "--no_prereq"): + OPTIONS.omit_prereq = True + elif o in ("-o", "--oem_settings"): + OPTIONS.oem_source = a + elif o in ("-e", "--extra_script"): + OPTIONS.extra_script = a + elif o in ("-a", "--aslr_mode"): + if a in ("on", "On", "true", "True", "yes", "Yes"): + OPTIONS.aslr_mode = True + else: + OPTIONS.aslr_mode = False + elif o in ("-t", "--worker_threads"): + if a.isdigit(): + OPTIONS.worker_threads = int(a) + else: + raise ValueError("Cannot parse value %r for option %r - only " + "integers are allowed." % (a, o)) + elif o in ("-2", "--two_step"): + OPTIONS.two_step = True + elif o == "--no_signing": + OPTIONS.no_signing = True + elif o == "--verify": + OPTIONS.verify = True + elif o == "--block": + OPTIONS.block_based = True + elif o in ("-b", "--binary"): + OPTIONS.updater_binary = a + elif o in ("--no_fallback_to_full",): + OPTIONS.fallback_to_full = False + elif o in ("--backup"): + OPTIONS.backuptool = bool(a.lower() == 'true') + elif o in ("--override_device"): + OPTIONS.override_device = a + elif o in ("--override_prop"): + OPTIONS.override_prop = bool(a.lower() == 'true') + else: + return False + return True + + args = common.ParseOptions(argv, __doc__, + extra_opts="b:k:i:d:wne:t:a:2o:", + extra_long_opts=[ + "board_config=", + "package_key=", + "incremental_from=", + "full_radio", + "full_bootloader", + "wipe_user_data", + "no_prereq", + "extra_script=", + "worker_threads=", + "aslr_mode=", + "two_step", + "no_signing", + "block", + "binary=", + "oem_settings=", + "verify", + "no_fallback_to_full", + "backup=", + "override_device=", + "override_prop=" + ], extra_option_handler=option_handler) + + if len(args) != 2: + common.Usage(__doc__) + sys.exit(1) + + if OPTIONS.extra_script is not None: + OPTIONS.extra_script = open(OPTIONS.extra_script).read() + + print "unzipping target target-files..." + OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) + + OPTIONS.target_tmp = OPTIONS.input_tmp + OPTIONS.info_dict = common.LoadInfoDict(input_zip) + + # If this image was originally labelled with SELinux contexts, make sure we + # also apply the labels in our new image. During building, the "file_contexts" + # is in the out/ directory tree, but for repacking from target-files.zip it's + # in the root directory of the ramdisk. + if "selinux_fc" in OPTIONS.info_dict: + OPTIONS.info_dict["selinux_fc"] = os.path.join( + OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts") + + if OPTIONS.verbose: + print "--- target info ---" + common.DumpInfoDict(OPTIONS.info_dict) + + # If the caller explicitly specified the device-specific extensions + # path via -s/--device_specific, use that. Otherwise, use + # META/releasetools.py if it is present in the target target_files. + # Otherwise, take the path of the file from 'tool_extensions' in the + # info dict and look for that in the local filesystem, relative to + # the current directory. + + if OPTIONS.device_specific is None: + from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") + if os.path.exists(from_input): + print "(using device-specific extensions from target_files)" + OPTIONS.device_specific = from_input + else: + OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) + + if OPTIONS.device_specific is not None: + OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) + + while True: + + if OPTIONS.no_signing: + if os.path.exists(args[1]): + os.unlink(args[1]) + output_zip = zipfile.ZipFile(args[1], "w", + compression=zipfile.ZIP_DEFLATED) + else: + temp_zip_file = tempfile.NamedTemporaryFile() + output_zip = zipfile.ZipFile(temp_zip_file, "w", + compression=zipfile.ZIP_DEFLATED) + + if OPTIONS.incremental_source is None: + WriteFullOTAPackage(input_zip, output_zip) + if OPTIONS.package_key is None: + OPTIONS.package_key = OPTIONS.info_dict.get( + "default_system_dev_certificate", + "build/target/product/security/testkey") + common.ZipClose(output_zip) + break + + else: + print "unzipping source target-files..." + OPTIONS.source_tmp, source_zip = common.UnzipTemp( + OPTIONS.incremental_source) + OPTIONS.target_info_dict = OPTIONS.info_dict + OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) + if "selinux_fc" in OPTIONS.source_info_dict: + OPTIONS.source_info_dict["selinux_fc"] = os.path.join( + OPTIONS.source_tmp, "BOOT", "RAMDISK", "file_contexts") + if OPTIONS.package_key is None: + OPTIONS.package_key = OPTIONS.source_info_dict.get( + "default_system_dev_certificate", + "build/target/product/security/testkey") + if OPTIONS.verbose: + print "--- source info ---" + common.DumpInfoDict(OPTIONS.source_info_dict) + try: + WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) + common.ZipClose(output_zip) + break + except ValueError: + if not OPTIONS.fallback_to_full: + raise + print "--- failed to build incremental; falling back to full ---" + OPTIONS.incremental_source = None + common.ZipClose(output_zip) + + if not OPTIONS.no_signing: + SignOutput(temp_zip_file.name, args[1]) + temp_zip_file.close() + + print "done." + + +if __name__ == '__main__': + try: + common.CloseInheritedPipes() + main(sys.argv[1:]) + except common.ExternalError as e: + print + print " ERROR: %s" % (e,) + print + sys.exit(1) + finally: + common.Cleanup() From b27e12378f4c4e52a53f91c4df72ddd1fd687472 Mon Sep 17 00:00:00 2001 From: Alvin Francis Date: Mon, 1 Dec 2014 13:13:40 -0400 Subject: [PATCH 214/309] Fix libelf path for mm Fixes kernel build on darwin Updated for cm-13.0 Change-Id: Ic6cdb1734d7b865491e8e61403d32db4acafe514 Signed-off-by: Alvin Francis --- core/tasks/kernel.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 6c00bf786..293b86703 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -207,7 +207,7 @@ define clean-module-folder endef ifeq ($(HOST_OS),darwin) - MAKE_FLAGS += C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/0.153/libelf/ + MAKE_FLAGS += C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/src/libelf/ endif ifeq ($(TARGET_KERNEL_MODULES),) From 5a7d4a32dba15c170c5caebcb9fee5dadfc0c4cd Mon Sep 17 00:00:00 2001 From: Ameya Thakur Date: Wed, 24 Oct 2012 19:31:42 -0700 Subject: [PATCH 215/309] Fix case where boot/recovery.img were being built with wrong params. The boot and recovery images now get built using the same params during ota package generation as during a normal build. Change-Id: I93d46e11a4245288f0e87c87a2e4bf45ac5aff69 --- core/Makefile | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/core/Makefile b/core/Makefile index b97f209d6..857ca852a 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1629,6 +1629,15 @@ ifdef BOARD_KERNEL_BASE endif ifdef BOARD_KERNEL_PAGESIZE $(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/RECOVERY/pagesize +endif +ifdef BOARD_KERNEL_TAGS_ADDR + $(hide) echo "$(BOARD_KERNEL_TAGS_ADDR)" > $(zip_root)/RECOVERY/tagsaddr +endif +ifdef BOARD_RAMDISK_OFFSET + $(hide) echo "$(BOARD_RAMDISK_OFFSET)" > $(zip_root)/RECOVERY/ramdisk_offset +endif +ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) + $(hide) echo "$(INSTALLED_DTIMAGE_TARGET)" > $(zip_root)/RECOVERY/dt_args endif @# Components of the boot image $(hide) mkdir -p $(zip_root)/BOOT @@ -1654,6 +1663,15 @@ endif ifdef BOARD_KERNEL_PAGESIZE $(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/BOOT/pagesize endif +ifdef BOARD_KERNEL_TAGS_ADDR + $(hide) echo "$(BOARD_KERNEL_TAGS_ADDR)" > $(zip_root)/BOOT/tagsaddr +endif +ifdef BOARD_RAMDISK_OFFSET + $(hide) echo "$(BOARD_RAMDISK_OFFSET)" > $(zip_root)/BOOT/ramdisk_offset +endif +ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) + $(hide) echo "$(INSTALLED_DTIMAGE_TARGET)" > $(zip_root)/BOOT/dt_args +endif ifdef ZIP_SAVE_UBOOTIMG_ARGS $(hide) echo "$(ZIP_SAVE_UBOOTIMG_ARGS)" > $(zip_root)/BOOT/ubootargs endif From a412a531e116bce50baf23ea18610a279a0289fd Mon Sep 17 00:00:00 2001 From: Rashed Abdel-Tawab Date: Tue, 24 Feb 2015 23:00:38 -0500 Subject: [PATCH 216/309] qcom_utils: Add msm8992 and msm8994 * The Snapdragon 808 will be releasing as the msm8992 so reference it as such. * The Snapdragon 810 is already released and used, so build the qcom utilities for devices using msm8994. Change-Id: I564cb68295099a73fefd24d43e19ca371968ef44 --- core/qcom_utils.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk index 1177ad438..d1dd262b0 100755 --- a/core/qcom_utils.mk +++ b/core/qcom_utils.mk @@ -8,6 +8,8 @@ QCOM_BOARD_PLATFORMS += msm8916 QCOM_BOARD_PLATFORMS += msm8960 QCOM_BOARD_PLATFORMS += msm8974 QCOM_BOARD_PLATFORMS += mpq8092 +QCOM_BOARD_PLATFORMS += msm8992 +QCOM_BOARD_PLATFORMS += msm8994 QCOM_BOARD_PLATFORMS += msm_bronze QCOM_BOARD_PLATFORMS += apq8084 From 1eb6088098f5aec04e83427d9a8422d990f0d4db Mon Sep 17 00:00:00 2001 From: Marcos Marado Date: Fri, 14 Aug 2015 19:20:18 +0100 Subject: [PATCH 217/309] Adding msm8909 as a QCOM board platform Change-Id: I01c35348b463a769454d7b64e6710a647cabe4f7 --- core/qcom_utils.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk index d1dd262b0..bb1202c49 100755 --- a/core/qcom_utils.mk +++ b/core/qcom_utils.mk @@ -4,6 +4,7 @@ QCOM_BOARD_PLATFORMS += msm7x30 QCOM_BOARD_PLATFORMS += msm8226 QCOM_BOARD_PLATFORMS += msm8610 QCOM_BOARD_PLATFORMS += msm8660 +QCOM_BOARD_PLATFORMS += msm8909 QCOM_BOARD_PLATFORMS += msm8916 QCOM_BOARD_PLATFORMS += msm8960 QCOM_BOARD_PLATFORMS += msm8974 From 677b93d9e2f1ff7d49bc735c6c2ce1e467e1e9a3 Mon Sep 17 00:00:00 2001 From: Rashed Abdel-Tawab Date: Sat, 10 Oct 2015 15:15:34 -0400 Subject: [PATCH 218/309] qcom: Enable TARGET_COMPILE_WITH_MSM_KERNEL In the 6.0 HALs, CAF added the TARGET_COMPILE_WITH_MSM_KERNEL which is enabled when building AOSP with an in-line kernel. Since we only use in-line kernel builds, go ahead and enable it across the board for all Qualcomm devices Change-Id: I36bba34b7f1009b0776256cf0e0ce57e6c7377f4 --- core/qcom_target.mk | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 6922b46db..df23a36e6 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -22,6 +22,9 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) TARGET_USES_QCOM_BSP := true TARGET_ENABLE_QC_AV_ENHANCEMENTS := true + # Tell HALs that we're compiling an AOSP build with an in-line kernel + TARGET_COMPILE_WITH_MSM_KERNEL := true + # Enable DirectTrack for legacy targets ifneq ($(filter msm7x30 msm8660 msm8960,$(TARGET_BOARD_PLATFORM)),) ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) From a9c464d34d2358c4c0857ce7a768b2f0a31a48b4 Mon Sep 17 00:00:00 2001 From: Rashed Abdel-Tawab Date: Mon, 12 Oct 2015 21:09:43 -0400 Subject: [PATCH 219/309] build: Cleanup and fix merge derp * Fix VBOOT/VERITY desparity introduced in https://github.com/CyanogenMod/android_build/commit/409f818fccdee9147812b2cb06e703808f55a57a * Add some colors lost in rebase * Fix endif comments Change-Id: Ida979250d3cfeca0c534083c76c08b5d13f08689 --- core/Makefile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/Makefile b/core/Makefile index 857ca852a..48c5c15d7 100644 --- a/core/Makefile +++ b/core/Makefile @@ -549,10 +549,11 @@ bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER) $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET) $(BOOT_SIGNER) /boot $(INSTALLED_BOOTIMAGE_TARGET) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(INSTALLED_BOOTIMAGE_TARGET) $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)) + @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} else ifndef BOARD_CUSTOM_BOOTIMG_MK - ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)) # TARGET_BOOTIMAGE_USE_EXT2 != true + ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(call pretty,"Target boot image: $@") @@ -567,7 +568,7 @@ bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER) $(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET) $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)) -else # PRODUCT_SUPPORTS_VBOOT != true + else # PRODUCT_SUPPORTS_VBOOT != true $(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(call pretty,"Target boot image: $@") @@ -583,7 +584,7 @@ bootimage-nodeps: $(MKBOOTIMG) @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} endif # PRODUCT_SUPPORTS_VERITY -endif # TARGET_BOOTIMAGE_USE_EXT2 / BOARD_CUSTOM_BOOTIMG_MK +endif # PRODUCT_SUPPORTS_BOOT_SIGNER / BOARD_CUSTOM_BOOTIMG_MK else # TARGET_NO_KERNEL # HACK: The top-level targets depend on the bootimage. Not all targets @@ -933,9 +934,9 @@ define build-recoveryimage-target @echo -e ${CL_CYN}"----- Making recovery image ------"${CL_RST} $(hide) mkdir -p $(TARGET_RECOVERY_OUT) $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/tmp - @echo Copying baseline ramdisk... + @echo -e ${CL_CYN}"Copying baseline ramdisk..."${CL_RST} $(hide) rsync -a $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac. - @echo Modifying ramdisk contents... + @echo -e ${CL_CYN}"Modifying ramdisk contents..."${CL_RST} $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc $(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/ $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy From d1cbb35c74558fd95ba5667d7aa42a0f3f91f885 Mon Sep 17 00:00:00 2001 From: Rashed Abdel-Tawab Date: Fri, 9 Oct 2015 16:36:40 -0400 Subject: [PATCH 220/309] qcom: Add QTI_BSP Change-Id: Id99fb17044e453af050e6e3ff59cd3f5a47da268 --- core/qcom_target.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index df23a36e6..38303e978 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -18,6 +18,7 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) qcom_flags := -DQCOM_HARDWARE qcom_flags += -DQCOM_BSP + qcom_flags += -DQTI_BSP TARGET_USES_QCOM_BSP := true TARGET_ENABLE_QC_AV_ENHANCEMENTS := true From 7cb2a03029915bfb54aec684ecc77ef2371a2996 Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Tue, 26 May 2015 18:21:15 -0700 Subject: [PATCH 221/309] build/core: Inherit vendor/* makefiles for check-api. Change-Id: I72d409075192b18c35068566c5687092d45be9b4 Conflicts: core/config.mk --- core/clear_vars.mk | 3 ++- core/config.mk | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/core/clear_vars.mk b/core/clear_vars.mk index 7847ae927..794e1a918 100644 --- a/core/clear_vars.mk +++ b/core/clear_vars.mk @@ -293,7 +293,8 @@ LOCAL_MODULE_STEM_64:= LOCAL_CLANG_32:= LOCAL_CLANG_64:= -LOCAL_IGNORE_SUBDIR:= +# Include any vendor specific clear_vars.mk file +-include $(TOPDIR)vendor/*/build/core/clear_vars.mk # Trim MAKEFILE_LIST so that $(call my-dir) doesn't need to # iterate over thousands of entries every time. diff --git a/core/config.mk b/core/config.mk index b95e974cd..766be1dcc 100644 --- a/core/config.mk +++ b/core/config.mk @@ -709,6 +709,13 @@ ifneq ($(SLIM_BUILD),) ## We need to be sure the global selinux policies are included ## last, to avoid accidental resetting by device configs $(eval include vendor/slim/sepolicy/sepolicy.mk) + +# Include any vendor specific config.mk file +-include $(TOPDIR)vendor/*/build/core/config.mk + +# Include any vendor specific apicheck.mk file +-include $(TOPDIR)vendor/*/build/core/apicheck.mk + endif include $(BUILD_SYSTEM)/dumpvar.mk From 987ce92ac7dd1b57fba8c37c1a162cbeb4c32137 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Tue, 13 Oct 2015 13:22:43 -0700 Subject: [PATCH 222/309] qcom: Remove QCOM_DIRECTTRACK * DirectTrack/LPA/tunnel for 8960 only works with AwesomePlayer, which has been deprecated. Don't bother allowing this to compile. Change-Id: I4d3e6dd9f1e3047a379fd76af4f6b45d791210de --- core/qcom_target.mk | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 38303e978..17c93354e 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -26,11 +26,7 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) # Tell HALs that we're compiling an AOSP build with an in-line kernel TARGET_COMPILE_WITH_MSM_KERNEL := true - # Enable DirectTrack for legacy targets ifneq ($(filter msm7x30 msm8660 msm8960,$(TARGET_BOARD_PLATFORM)),) - ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true) - qcom_flags += -DQCOM_DIRECTTRACK - endif # Enable legacy graphics functions qcom_flags += -DQCOM_BSP_LEGACY endif From a8414ca304b110bd3a2b94f9bb36ad88682b1547 Mon Sep 17 00:00:00 2001 From: Dan Pasanen Date: Thu, 15 Oct 2015 09:10:50 -0500 Subject: [PATCH 223/309] releasetools: don't attempt to read fingerprint on unified devices * You wont find this in the build.prop on these devices and this is how we handled them in previous versions Change-Id: I56332c87916da1a1206980df0e7a6ca8a55f0e8e --- tools/releasetools/ota_from_target_files.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index c8fa8553d..f7a0212a4 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -457,6 +457,8 @@ def GetOemProperty(name, oem_props, oem_dict, info_dict): return GetBuildProp(name, info_dict) def CalculateFingerprint(oem_props, oem_dict, info_dict): + if OPTIONS.override_prop: + return GetBuildProp("ro.build.date.utc", info_dict) if oem_props is None: return GetBuildProp("ro.build.fingerprint", info_dict) return "%s/%s/%s:%s" % ( @@ -828,10 +830,11 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): # devices with thumbprints are all using file-based OTAs. Long term we # should factor out the common parts into a shared one to avoid further # divergence. - source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) - target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) - metadata["pre-build"] = source_fp - metadata["post-build"] = target_fp + if not OPTIONS.override_prop: + source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) + target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) + metadata["pre-build"] = source_fp + metadata["post-build"] = target_fp source_boot = common.GetBootableImage( "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", From 1d625dd33ae7b38af79b13bc888abe6e518b1827 Mon Sep 17 00:00:00 2001 From: dankoman Date: Sun, 22 Jun 2014 22:26:43 +0200 Subject: [PATCH 224/309] build: Add ASCII logo to edify script Less boring, don't you think? _____________________ / www.slimroms.eu | / | / ___________________| / / ___ / / ___/ \ / / / \___/____ ____ / / | |___/ \_/ | ___________/ / | | | | | / | | | | | | | / | | | | | | |_____________/ \___^___^___^___^___/ oldChange-Id: I8bbd68e5be18753fde34cd76f24038f5d0d3a190 Conflicts: tools/releasetools/ota_from_target_files Change-Id: If241a0e91ecc34fc8ed5386dd7586aebbc3f1777 --- tools/releasetools/ota_from_target_files.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index f7a0212a4..06ec700ae 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -626,6 +626,20 @@ def WriteFullOTAPackage(input_zip, output_zip): system_progress = 0.75 + script.Print(" _____________________ ") + script.Print(" / www.slimroms.org |") + script.Print(" / |") + script.Print(" / ___________________|") + script.Print(" / / ___ ") + script.Print(" / / ___/ \ ") + script.Print(" / / / \___/____ ____ ") + script.Print(" / / | |___/ \_/ |") + script.Print(" ___________/ / | | | |") + script.Print("| / | | | | | |") + script.Print("| / | | | | | |") + script.Print("|_____________/ \___^___^___^___^___/") + script.Print(" ") + if OPTIONS.wipe_user_data: system_progress -= 0.1 if HasVendorPartition(input_zip): From 71afec188601161408a61bbca45fedaf7a06b919 Mon Sep 17 00:00:00 2001 From: Chirayu Desai Date: Tue, 30 Apr 2013 17:08:17 +0530 Subject: [PATCH 225/309] roomservice: python3 support Change-Id: I7621818ba7ed997676728fe865f37a25b3a5b8b5 Signed-off-by: Chirayu Desai --- tools/roomservice.py | 86 ++++++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 35 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index b8b505f83..4c186ee1f 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -13,12 +13,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -import sys -import urllib2 +from __future__ import print_function + +import base64 import json +import netrc +import os import re -import netrc, base64 +import sys +try: + # For python3 + import urllib.error + import urllib.parse + import urllib.request +except ImportError: + # For python2 + import imp + import urllib2 + import urlparse + urllib = imp.new_module('urllib') + urllib.error = urllib2 + urllib.parse = urlparse + urllib.request = urllib2 + from xml.etree import ElementTree product = sys.argv[1]; @@ -34,7 +51,7 @@ device = product if not depsonly: - print "Device %s not found. Attempting to retrieve device repository from SlimRoms Github (http://github.com/SlimRoms)." % device + print("Device %s not found. Attempting to retrieve device repository from SlimRoms Github (http://github.com/SlimRoms)." % device) repositories = [] @@ -54,9 +71,9 @@ def add_auth(githubreq): page = 1 while not depsonly: - githubreq = urllib2.Request("https://api.github.com/users/SlimRoms/repos?per_page=200&page=%d" % page) + githubreq = urllib.request.Request("https://api.github.com/users/SlimRoms/repos?per_page=200&page=%d" % page) add_auth(githubreq) - result = json.loads(urllib2.urlopen(githubreq).read()) + result = json.loads(urllib.request.urlopen(githubreq).read().decode()) if len(result) == 0: break for res in result: @@ -153,25 +170,25 @@ def add_to_manifest(repositories, fallback_branch = None): repo_name = repository['repository'] repo_target = repository['target_path'] if exists_in_tree(lm, repo_name): - print 'SlimRoms/%s already exists' % (repo_name) + print('SlimRoms/%s already exists' % (repo_name)) continue - print 'Adding dependency: SlimRoms/%s -> %s' % (repo_name, repo_target) + print('Adding dependency: SlimRoms/%s -> %s' % (repo_name, repo_target)) project = ElementTree.Element("project", attrib = { "path": repo_target, "remote": "github", "name": "SlimRoms/%s" % repo_name }) if 'branch' in repository: project.set('revision',repository['branch']) elif fallback_branch: - print "Using fallback branch %s for %s" % (fallback_branch, repo_name) + print("Using fallback branch %s for %s" % (fallback_branch, repo_name)) project.set('revision', fallback_branch) else: - print "Using default branch for %s" % repo_name + print("Using default branch for %s" % repo_name) lm.append(project) indent(lm, 0) - raw_xml = ElementTree.tostring(lm) + raw_xml = ElementTree.tostring(lm).decode() raw_xml = '\n' + raw_xml f = open('.repo/local_manifests/slim_manifest.xml', 'w') @@ -179,7 +196,7 @@ def add_to_manifest(repositories, fallback_branch = None): f.close() def fetch_dependencies(repo_path, fallback_branch = None): - print 'Looking for dependencies' + print('Looking for dependencies') dependencies_path = repo_path + '/slim.dependencies' syncable_repos = [] @@ -196,13 +213,13 @@ def fetch_dependencies(repo_path, fallback_branch = None): dependencies_file.close() if len(fetch_list) > 0: - print 'Adding dependencies to manifest' + print('Adding dependencies to manifest') add_to_manifest(fetch_list, fallback_branch) else: - print 'Dependencies file not found, bailing out.' + print('Dependencies file not found, bailing out.') if len(syncable_repos) > 0: - print 'Syncing dependencies' + print('Syncing dependencies') os.system('repo sync %s' % ' '.join(syncable_repos)) for deprepo in syncable_repos: @@ -216,7 +233,7 @@ def has_branch(branches, revision): if repo_path: fetch_dependencies(repo_path) else: - print "Trying dependencies-only mode on a non-existing device tree?" + print("Trying dependencies-only mode on a non-existing device tree?") sys.exit() @@ -224,22 +241,22 @@ def has_branch(branches, revision): for repository in repositories: repo_name = repository['name'] if repo_name.startswith("android_device_") and repo_name.endswith("_" + device): - print "Found repository: %s" % repository['name'] + print("Found repository: %s" % repository['name']) manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "") default_revision = get_default_revision() - print "Default revision: %s" % default_revision - print "Checking branch info" - githubreq = urllib2.Request(repository['branches_url'].replace('{/branch}', '')) + print("Default revision: %s" % default_revision) + print("Checking branch info") + githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', '')) add_auth(githubreq) - result = json.loads(urllib2.urlopen(githubreq).read()) + result = json.loads(urllib.request.urlopen(githubreq).read().decode()) ## Try tags, too, since that's what releases use if not has_branch(result, default_revision): - githubreq = urllib2.Request(repository['tags_url'].replace('{/tag}', '')) + githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', '')) add_auth(githubreq) - result.extend (json.loads(urllib2.urlopen(githubreq).read())) + result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode())) repo_path = "device/%s/%s" % (manufacturer, device) adding = {'repository':repo_name,'target_path':repo_path} @@ -247,30 +264,29 @@ def has_branch(branches, revision): fallback_branch = None if not has_branch(result, default_revision): if os.getenv('ROOMSERVICE_BRANCHES'): - fallbacks = filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')) + fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' '))) for fallback in fallbacks: if has_branch(result, fallback): - print "Using fallback branch: %s" % fallback + print("Using fallback branch: %s" % fallback) fallback_branch = fallback break if not fallback_branch: - print "Default revision %s not found in %s. Bailing." % (default_revision, repo_name) - print "Branches found:" + print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name)) + print("Branches found:") for branch in [branch['name'] for branch in result]: - print branch - print "Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches." + print(branch) + print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.") sys.exit() add_to_manifest([adding], fallback_branch) - print "Syncing repository to retrieve project." + print("Syncing repository to retrieve project.") os.system('repo sync %s' % repo_path) - print "Repository synced!" + print("Repository synced!") fetch_dependencies(repo_path, fallback_branch) - print "Done" + print("Done") sys.exit() -print "Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your local_manifests/slim_manifest.xml." % device - +print("Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your local_manifests/slim_manifest.xml." % device) From 0ba081be4b023cf42c2b82801d9733b496189833 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Fri, 31 Oct 2014 21:02:37 -0500 Subject: [PATCH 226/309] roomservice: Improve new device retrieval Without credentials, GitHub's search API limits requests to 60/hr. The existing method to add a new device is to fetch JSON-formatted info for ALL CM repositories and then search for the device. In doing so, more than 10 pages of results are returned (i.e. more than 10 requests per device). This is clumsy, slow, and limits use of roomservice to only ~5 devices per hour. Instead, only return search results for repositories that have the device name in the repository name. Then, one device = one request. It's faster and allows closer to 60 device setups / hr. Additional bailouts are included to stop the script earlier than later if a device is not found. Change-Id: I7f914d7ede82da0f100d9fd6cf8b603177962e48 --- tools/roomservice.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 4c186ee1f..bc2991882 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -69,16 +69,20 @@ def add_auth(githubreq): if githubauth: githubreq.add_header("Authorization","Basic %s" % githubauth) -page = 1 -while not depsonly: - githubreq = urllib.request.Request("https://api.github.com/users/SlimRoms/repos?per_page=200&page=%d" % page) +if not depsonly: + githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:SlimRoms+in:name" % device) add_auth(githubreq) result = json.loads(urllib.request.urlopen(githubreq).read().decode()) - if len(result) == 0: - break - for res in result: + try: + numresults = int(result['total_count']) + except: + print("Failed to search GitHub (offline?)") + sys.exit() + if (numresults == 0): + print("Could not find device %s on github.com/CyanogenMod" % device) + sys.exit() + for res in result['items']: repositories.append(res) - page = page + 1 local_manifests = r'.repo/local_manifests' if not os.path.exists(local_manifests): os.makedirs(local_manifests) From e3777c9939412a5ea5a20cb06fa4f0e7ae1c2597 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Fri, 2 Jan 2015 00:08:48 -0600 Subject: [PATCH 227/309] roomservice: Fix search of devices forked to SLIM Commit "roomservice: Improve new device retrieval" introduced a regression where repositories that were forked to SLIM were omitted from search results. This fixes that issue. Change-Id: I7bf54129b5da1749abe5b2b9a492cb93e6ee41a6 --- tools/roomservice.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index bc2991882..dfdbc5cd4 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -70,7 +70,7 @@ def add_auth(githubreq): githubreq.add_header("Authorization","Basic %s" % githubauth) if not depsonly: - githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:SlimRoms+in:name" % device) + githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:SlimRoms+in:name+fork:true" % device) add_auth(githubreq) result = json.loads(urllib.request.urlopen(githubreq).read().decode()) try: From 8a08f5e37aa358fdddac60a3c9a8bd05c50792af Mon Sep 17 00:00:00 2001 From: Anthony King Date: Thu, 8 Jan 2015 11:39:12 -0600 Subject: [PATCH 228/309] roomservice: Improve error handling on search Use the relevant forms of 'except' for urllib and parsing instead of a general except which was performed too late anyways. Change-Id: Ia1fc89dd5a8a703fc0175aef7b6dd013a44a2c8e --- tools/roomservice.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index dfdbc5cd4..e7319d177 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -72,16 +72,15 @@ def add_auth(githubreq): if not depsonly: githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:SlimRoms+in:name+fork:true" % device) add_auth(githubreq) - result = json.loads(urllib.request.urlopen(githubreq).read().decode()) try: - numresults = int(result['total_count']) - except: - print("Failed to search GitHub (offline?)") + result = json.loads(urllib.request.urlopen(githubreq).read().decode()) + except urllib.error.URLError: + print("Failed to search GitHub") sys.exit() - if (numresults == 0): - print("Could not find device %s on github.com/CyanogenMod" % device) + except ValueError: + print("Failed to parse return data from GitHub") sys.exit() - for res in result['items']: + for res in result.get('items', []): repositories.append(res) local_manifests = r'.repo/local_manifests' From cdcdd8b836ba0d2f4b002b23fa2f67fd2ca13b3e Mon Sep 17 00:00:00 2001 From: Nicholas Flintham Date: Wed, 1 Apr 2015 14:16:24 +0100 Subject: [PATCH 229/309] Fixup zip naming and roomservice Change-Id: Ibc77861c068edefc35d6925d21f320c187bacd0b Conflicts: core/Makefile --- core/Makefile | 2 +- envsetup.sh | 2 +- tools/roomservice.py | 89 ++++++++++++++++++++++++++++++++++++-------- 3 files changed, 75 insertions(+), 18 deletions(-) diff --git a/core/Makefile b/core/Makefile index 48c5c15d7..e17312495 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1842,7 +1842,7 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \ $(BUILT_TARGET_FILES_PACKAGE) $@ -SLIM_TARGET_PACKAGE := $(PRODUCT_OUT)/Slim-$(SLIM_VERSION).zip +SLIM_TARGET_PACKAGE := $(PRODUCT_OUT)/$(SLIM_MOD_VERSION).zip .PHONY: otapackage bacon otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) diff --git a/envsetup.sh b/envsetup.sh index afe1bacf7..c27ea51d6 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -739,7 +739,7 @@ function eat() { if [ "$OUT" ] ; then MODVERSION=$(get_build_var SLIM_VERSION) - ZIPFILE=slim-$MODVERSION.zip + ZIPFILE=$MODVERSION.zip ZIPPATH=$OUT/$ZIPFILE if [ ! -f $ZIPPATH ] ; then echo "Nothing to eat" diff --git a/tools/roomservice.py b/tools/roomservice.py index e7319d177..b3e442fb4 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -108,12 +108,48 @@ def indent(elem, level=0): if level and (not elem.tail or not elem.tail.strip()): elem.tail = i -def get_default_revision(): - m = ElementTree.parse(".repo/manifest.xml") + +def get_default(manifest=None): + m = manifest or ElementTree.parse(".repo/manifest.xml") d = m.findall('default')[0] - r = d.get('revision') + return d + + +def get_default_revision(manifest=None): + m = manifest or ElementTree.parse(".repo/manifest.xml") + r = get_default(manifest=m).get('revision') return r.replace('refs/heads/', '').replace('refs/tags/', '') + +def get_remote(manifest=None, remote_name=None): + m = manifest or ElementTree.parse(".repo/manifest.xml") + if not remote_name: + remote_name = get_default(manifest=m).get('remote') + remotes = m.findall('remote') + for remote in remotes: + if remote_name == remote.get('name'): + return remote + + +def get_revision(manifest=None, p="android_build"): + m = manifest or ElementTree.parse(".repo/manifest.xml") + project = None + for proj in m.findall("project"): + if re.search(r"%s$" % p, proj.get("name")): + project = proj + break + if project is not None: + return get_default_revision(manifest=m) + revision = project.get('revision') + if revision: + return revision.replace('refs/heads/', '').replace('refs/tags/', '') + remote = get_remote(manifest=m, remote_name=project.get('remote')) + revision = remote.get('revision') + if not revision: + return get_default_revision(manifest=None) + return revision.replace('refs/heads/', '').replace('refs/tags/', '') + + def get_from_manifest(devicename): try: lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") @@ -122,7 +158,7 @@ def get_from_manifest(devicename): lm = ElementTree.Element("manifest") for localpath in lm.findall("project"): - if re.search("android_device_.*_%s$" % device, localpath.get("name")): + if re.search("device_.*_%s$" % device, localpath.get("name")): return localpath.get("path") # Devices originally from AOSP are in the main manifest... @@ -133,7 +169,7 @@ def get_from_manifest(devicename): mm = ElementTree.Element("manifest") for localpath in mm.findall("project"): - if re.search("android_device_.*_%s$" % device, localpath.get("name")): + if re.search("device_.*_%s$" % device, localpath.get("name")): return localpath.get("path") return None @@ -173,21 +209,29 @@ def add_to_manifest(repositories, fallback_branch = None): repo_name = repository['repository'] repo_target = repository['target_path'] if exists_in_tree(lm, repo_name): - print('SlimRoms/%s already exists' % (repo_name)) + print('%s already exists' % (repo_name)) continue - print('Adding dependency: SlimRoms/%s -> %s' % (repo_name, repo_target)) - project = ElementTree.Element("project", attrib = { "path": repo_target, - "remote": "github", "name": "SlimRoms/%s" % repo_name }) + if "/" not in repo_name: + repo_name = "%s/%s" % ("SlimRoms", repo_name) + + print('Adding dependency: %s -> %s' % (repo_name, repo_target)) + + project = ElementTree.Element( + "project", + attrib={"path": repo_target, + "remote": "github", + "name": "%s" % repo_name} + ) if 'branch' in repository: - project.set('revision',repository['branch']) + project.set('revision', repository['branch']) elif fallback_branch: - print("Using fallback branch %s for %s" % (fallback_branch, repo_name)) + print("Using fallback branch %s for %s" % + (fallback_branch, repo_name)) project.set('revision', fallback_branch) else: print("Using default branch for %s" % repo_name) - lm.append(project) indent(lm, 0) @@ -198,7 +242,15 @@ def add_to_manifest(repositories, fallback_branch = None): f.write(raw_xml) f.close() +_fetch_dep_cache = [] + + def fetch_dependencies(repo_path, fallback_branch = None): + global _fetch_dep_cache + if repo_path in _fetch_dep_cache: + return + _fetch_dep_cache.append(repo_path) + print('Looking for dependencies') dependencies_path = repo_path + '/slim.dependencies' syncable_repos = [] @@ -209,7 +261,11 @@ def fetch_dependencies(repo_path, fallback_branch = None): fetch_list = [] for dependency in dependencies: - if not is_in_manifest("SlimRoms/%s" % dependency['repository']): + repo_name = dependency['repository'] + if "/" not in repo_name: + repo_name = "%s/%s" % ("SlimRoms", repo_name) + + if not is_in_manifest(repo_name): fetch_list.append(dependency) syncable_repos.append(dependency['target_path']) @@ -243,12 +299,12 @@ def has_branch(branches, revision): else: for repository in repositories: repo_name = repository['name'] - if repo_name.startswith("android_device_") and repo_name.endswith("_" + device): + if repo_name.startswith("device_") and repo_name.endswith("_" + device): print("Found repository: %s" % repository['name']) - manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "") + manufacturer = repo_name.replace("device_", "").replace("_" + device, "") - default_revision = get_default_revision() + default_revision = get_revision() print("Default revision: %s" % default_revision) print("Checking branch info") githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', '')) @@ -293,3 +349,4 @@ def has_branch(branches, revision): sys.exit() print("Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your local_manifests/slim_manifest.xml." % device) + From 6595d4b0625c2245ea698bab4ad11f9cf9e41b5a Mon Sep 17 00:00:00 2001 From: Anthony King Date: Wed, 29 Apr 2015 15:06:31 +0100 Subject: [PATCH 230/309] roomservice: hack the calculated revision in to shape Change-Id: Ibccc93a6d899e7c7f2dad2f493a3ca1fc0a2815a --- tools/roomservice.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index b3e442fb4..5ccbb9557 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -138,7 +138,7 @@ def get_revision(manifest=None, p="android_build"): if re.search(r"%s$" % p, proj.get("name")): project = proj break - if project is not None: + if project is None: return get_default_revision(manifest=m) revision = project.get('revision') if revision: @@ -304,7 +304,8 @@ def has_branch(branches, revision): manufacturer = repo_name.replace("device_", "").replace("_" + device, "") - default_revision = get_revision() + calc_revision = get_revision() + default_revision = get_default_revision() print("Default revision: %s" % default_revision) print("Checking branch info") githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', '')) @@ -312,16 +313,18 @@ def has_branch(branches, revision): result = json.loads(urllib.request.urlopen(githubreq).read().decode()) ## Try tags, too, since that's what releases use - if not has_branch(result, default_revision): + if not (has_branch(result, calc_revision) or has_branch(result, default_revision)): githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', '')) add_auth(githubreq) - result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode())) + result.extend(json.loads(urllib.request.urlopen(githubreq).read().decode())) repo_path = "device/%s/%s" % (manufacturer, device) adding = {'repository':repo_name,'target_path':repo_path} fallback_branch = None - if not has_branch(result, default_revision): + if calc_revision != default_revision and has_branch(result, calc_revision): + fallback_branch = calc_revision + if not fallback_branch and not has_branch(result, default_revision): if os.getenv('ROOMSERVICE_BRANCHES'): fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' '))) for fallback in fallbacks: From 6883f9b02deca0597eb3b2a4b54422c70688fd2b Mon Sep 17 00:00:00 2001 From: Anthony King Date: Fri, 9 Jan 2015 18:04:22 +0000 Subject: [PATCH 231/309] Overhaul roomservice for sanity and cleanliness I wish anyone porting this over to other versions of roomservice luck Change-Id: I372d254469f5b3306e0a5e370d26d22ed6e49eaa --- tools/roomservice.py | 451 ++++++++++++++++++++++--------------------- 1 file changed, 235 insertions(+), 216 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 5ccbb9557..cde9bea04 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -1,5 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2012-2013, The SlimRoms Project +# Copyright (C) 2012-2013, The CyanogenMod Project +# Copyright (C) 2012-2015, SlimRoms Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,82 +20,68 @@ import json import netrc import os -import re import sys -try: - # For python3 - import urllib.error - import urllib.parse - import urllib.request -except ImportError: - # For python2 - import imp - import urllib2 - import urlparse - urllib = imp.new_module('urllib') - urllib.error = urllib2 - urllib.parse = urlparse - urllib.request = urllib2 from xml.etree import ElementTree -product = sys.argv[1]; - -if len(sys.argv) > 2: - depsonly = sys.argv[2] -else: - depsonly = None - try: - device = product[product.index("_") + 1:] -except: - device = product - -if not depsonly: - print("Device %s not found. Attempting to retrieve device repository from SlimRoms Github (http://github.com/SlimRoms)." % device) - -repositories = [] - -try: - authtuple = netrc.netrc().authenticators("api.github.com") - - if authtuple: - githubauth = base64.encodestring('%s:%s' % (authtuple[0], authtuple[2])).replace('\n', '') - else: - githubauth = None -except: - githubauth = None - -def add_auth(githubreq): - if githubauth: - githubreq.add_header("Authorization","Basic %s" % githubauth) - -if not depsonly: - githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:SlimRoms+in:name+fork:true" % device) - add_auth(githubreq) - try: - result = json.loads(urllib.request.urlopen(githubreq).read().decode()) - except urllib.error.URLError: - print("Failed to search GitHub") - sys.exit() - except ValueError: - print("Failed to parse return data from GitHub") - sys.exit() - for res in result.get('items', []): - repositories.append(res) + # For python3 + import urllib.error + import urllib.parse + import urllib.request +except ImportError: + # For python2 + import imp + import urllib2 + import urlparse + urllib = imp.new_module('urllib') + urllib.error = urllib2 + urllib.parse = urlparse + urllib.request = urllib2 + +default_manifest = ".repo/manifest.xml" + +custom_local_manifest = ".repo/local_manifests/slim_manifest.xml" +custom_default_revision = "lp5.0" +custom_dependencies = "slim.dependencies" +org_manifest = "SlimRoms" # leave empty if org is provided in manifest +org_display = "SlimRoms" # needed for displaying + +github_auth = None + + +local_manifests = '.repo/local_manifests' +if not os.path.exists(local_manifests): + os.makedirs(local_manifests) + + +def add_auth(g_req): + global github_auth + if github_auth is None: + try: + auth = netrc.netrc().authenticators("api.github.com") + except (netrc.NetrcParseError, IOError): + auth = None + if auth: + github_auth = base64.b64encode( + ('%s:%s' % (auth[0], auth[2])).encode() + ) + else: + github_auth = "" + if github_auth: + g_req.add_header("Authorization", "Basic %s" % github_auth) -local_manifests = r'.repo/local_manifests' -if not os.path.exists(local_manifests): os.makedirs(local_manifests) -def exists_in_tree(lm, repository): +def exists_in_tree(lm, repo): for child in lm.getchildren(): - if child.attrib['name'].endswith(repository): + if child.attrib['name'].endswith(repo): return True return False -# in-place prettyprint formatter + def indent(elem, level=0): - i = "\n" + level*" " + # in-place prettyprint formatter + i = "\n" + " " * level if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " @@ -109,20 +96,27 @@ def indent(elem, level=0): elem.tail = i +def load_manifest(manifest): + try: + man = ElementTree.parse(manifest).getroot() + except (IOError, ElementTree.ParseError): + man = ElementTree.Element("manifest") + return man + + def get_default(manifest=None): - m = manifest or ElementTree.parse(".repo/manifest.xml") + m = manifest or load_manifest(default_manifest) d = m.findall('default')[0] return d def get_default_revision(manifest=None): - m = manifest or ElementTree.parse(".repo/manifest.xml") - r = get_default(manifest=m).get('revision') + r = get_default(manifest=manifest).get('revision') return r.replace('refs/heads/', '').replace('refs/tags/', '') def get_remote(manifest=None, remote_name=None): - m = manifest or ElementTree.parse(".repo/manifest.xml") + m = manifest or load_manifest(default_manifest) if not remote_name: remote_name = get_default(manifest=m).get('remote') remotes = m.findall('remote') @@ -132,10 +126,10 @@ def get_remote(manifest=None, remote_name=None): def get_revision(manifest=None, p="android_build"): - m = manifest or ElementTree.parse(".repo/manifest.xml") + m = manifest or load_manifest(default_manifest) project = None - for proj in m.findall("project"): - if re.search(r"%s$" % p, proj.get("name")): + for proj in m.findall('project'): + if proj.get('name').endswith(p): project = proj break if project is None: @@ -150,70 +144,37 @@ def get_revision(manifest=None, p="android_build"): return revision.replace('refs/heads/', '').replace('refs/tags/', '') -def get_from_manifest(devicename): - try: - lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") - lm = lm.getroot() - except: - lm = ElementTree.Element("manifest") - - for localpath in lm.findall("project"): - if re.search("device_.*_%s$" % device, localpath.get("name")): - return localpath.get("path") - - # Devices originally from AOSP are in the main manifest... - try: - mm = ElementTree.parse(".repo/manifest.xml") - mm = mm.getroot() - except: - mm = ElementTree.Element("manifest") - - for localpath in mm.findall("project"): - if re.search("device_.*_%s$" % device, localpath.get("name")): - return localpath.get("path") - +def get_from_manifest(device_name): + for man in (custom_local_manifest, default_manifest): + man = load_manifest(man) + for local_path in man.findall("project"): + lp = local_path.get("name") + if lp.startswith("device_") and lp.endswith("_" + device_name): + return local_path.get("path") return None -def is_in_manifest(projectname): - try: - lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") - lm = lm.getroot() - except: - lm = ElementTree.Element("manifest") - - for localpath in lm.findall("project"): - if localpath.get("name") == projectname: - return 1 - - ## Search in main manifest, too - try: - lm = ElementTree.parse(".repo/manifest.xml") - lm = lm.getroot() - except: - lm = ElementTree.Element("manifest") - for localpath in lm.findall("project"): - if localpath.get("name") == projectname: - return 1 +def is_in_manifest(project_name): + for man in (custom_local_manifest, default_manifest): + man = load_manifest(man) + for local_path in man.findall("project"): + if local_path.get("name") == project_name: + return True + return False - return None -def add_to_manifest(repositories, fallback_branch = None): - try: - lm = ElementTree.parse(".repo/local_manifests/slim_manifest.xml") - lm = lm.getroot() - except: - lm = ElementTree.Element("manifest") +def add_to_manifest(repos, fallback_branch=None): + lm = load_manifest(custom_local_manifest) - for repository in repositories: - repo_name = repository['repository'] - repo_target = repository['target_path'] + for repo in repos: + repo_name = repo['repository'] + repo_target = repo['target_path'] if exists_in_tree(lm, repo_name): - print('%s already exists' % (repo_name)) + print('%s already exists' % repo_name) continue if "/" not in repo_name: - repo_name = "%s/%s" % ("SlimRoms", repo_name) + repo_name = os.path.join(org_manifest, repo_name) print('Adding dependency: %s -> %s' % (repo_name, repo_target)) @@ -224,132 +185,190 @@ def add_to_manifest(repositories, fallback_branch = None): "name": "%s" % repo_name} ) - if 'branch' in repository: - project.set('revision', repository['branch']) + if 'branch' in repo: + project.set('revision', repo['branch']) elif fallback_branch: - print("Using fallback branch %s for %s" % + print("Using branch %s for %s" % (fallback_branch, repo_name)) project.set('revision', fallback_branch) else: print("Using default branch for %s" % repo_name) lm.append(project) - indent(lm, 0) - raw_xml = ElementTree.tostring(lm).decode() - raw_xml = '\n' + raw_xml + indent(lm) + raw_xml = "\n".join(('', + ElementTree.tostring(lm).decode())) - f = open('.repo/local_manifests/slim_manifest.xml', 'w') + f = open(custom_local_manifest, 'w') f.write(raw_xml) f.close() _fetch_dep_cache = [] -def fetch_dependencies(repo_path, fallback_branch = None): +def fetch_dependencies(repo_path, fallback_branch=None): global _fetch_dep_cache if repo_path in _fetch_dep_cache: return _fetch_dep_cache.append(repo_path) print('Looking for dependencies') - dependencies_path = repo_path + '/slim.dependencies' - syncable_repos = [] - if os.path.exists(dependencies_path): - dependencies_file = open(dependencies_path, 'r') - dependencies = json.loads(dependencies_file.read()) - fetch_list = [] - - for dependency in dependencies: - repo_name = dependency['repository'] - if "/" not in repo_name: - repo_name = "%s/%s" % ("SlimRoms", repo_name) + dep_p = '/'.join((repo_path, custom_dependencies)) + if os.path.exists(dep_p): + with open(dep_p) as dep_f: + dependencies = json.load(dep_f) + else: + dependencies = {} + print('Dependencies file not found, bailing out.') - if not is_in_manifest(repo_name): - fetch_list.append(dependency) - syncable_repos.append(dependency['target_path']) + fetch_list = [] + syncable_repos = [] - dependencies_file.close() + for dependency in dependencies: + repo_name = dependency['repository'] + if '/' not in repo_name: + repo_name = os.path.join(org_manifest, repo_name) + if not is_in_manifest(repo_name): + fetch_list.append(dependency) + syncable_repos.append(dependency['target_path']) - if len(fetch_list) > 0: - print('Adding dependencies to manifest') - add_to_manifest(fetch_list, fallback_branch) - else: - print('Dependencies file not found, bailing out.') + if fetch_list: + print('Adding dependencies to manifest') + add_to_manifest(fetch_list, fallback_branch) - if len(syncable_repos) > 0: + if syncable_repos: print('Syncing dependencies') os.system('repo sync %s' % ' '.join(syncable_repos)) for deprepo in syncable_repos: fetch_dependencies(deprepo) + def has_branch(branches, revision): - return revision in [branch['name'] for branch in branches] + return revision in (branch['name'] for branch in branches) -if depsonly: - repo_path = get_from_manifest(device) - if repo_path: - fetch_dependencies(repo_path) - else: - print("Trying dependencies-only mode on a non-existing device tree?") +def detect_revision(repo): + """ + returns None if using the default revision, else return + the branch name if using a different revision + """ + print("Checking branch info") + githubreq = urllib.request.Request( + repo['branches_url'].replace('{/branch}', '')) + add_auth(githubreq) + result = json.loads(urllib.request.urlopen(githubreq).read().decode()) + + calc_revision = get_revision() + default_revision = get_default_revision() + print("Calculated revision: %s" % calc_revision) + print("Default revision: %s" % default_revision) + + + if has_branch(result, calc_revision): + return calc_revision + if has_branch(result, default_revision): + return None + + # Try tags, too, since that's what releases use + githubreq = urllib.request.Request( + repo['tags_url'].replace('{/tag}', '')) + add_auth(githubreq) + result.extend(json.loads( + urllib.request.urlopen(githubreq).read().decode())) + + if has_branch(result, calc_revision): + return calc_revision + if has_branch(result, default_revision): + return None + + fallbacks = os.getenv('ROOMSERVICE_BRANCHES', '').split() + for fallback in fallbacks: + if has_branch(result, fallback): + print("Using fallback branch: %s" % fallback) + return fallback + + if has_branch(result, custom_default_revision): + print("Falling back to custom revision: %s" + % custom_default_revision) + return custom_default_revision + + print("Default revision %s not found in %s. Bailing." % + (default_revision, repo['name'])) + print("Branches found:") + for branch in result: + print(branch['name']) + print("Use the ROOMSERVICE_BRANCHES environment variable to " + "specify a list of fallback branches.") sys.exit() -else: + +def main(): + try: + depsonly = bool(sys.argv[2]) + except IndexError: + depsonly = False + + product = sys.argv[1] + device = product[product.find("_") + 1:] or product + + if depsonly: + repo_path = get_from_manifest(device) + if repo_path: + fetch_dependencies(repo_path) + else: + print("Trying dependencies-only mode on a" + "non-existing device tree?") + sys.exit() + + print("Device {0} not found. Attempting to retrieve device repository from " + "{1} Github (http://github.com/{1}).".format(device, org_display)) + + githubreq = urllib.request.Request( + "https://api.github.com/search/repositories?" + "q={0}+user:{1}+in:name+fork:true".format(device, org_display)) + add_auth(githubreq) + + repositories = [] + + try: + result = json.loads(urllib.request.urlopen(githubreq).read().decode()) + except urllib.error.URLError: + print("Failed to search GitHub") + sys.exit() + except ValueError: + print("Failed to parse return data from GitHub") + sys.exit() + for res in result.get('items', []): + repositories.append(res) + for repository in repositories: repo_name = repository['name'] - if repo_name.startswith("device_") and repo_name.endswith("_" + device): - print("Found repository: %s" % repository['name']) - - manufacturer = repo_name.replace("device_", "").replace("_" + device, "") - - calc_revision = get_revision() - default_revision = get_default_revision() - print("Default revision: %s" % default_revision) - print("Checking branch info") - githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', '')) - add_auth(githubreq) - result = json.loads(urllib.request.urlopen(githubreq).read().decode()) - - ## Try tags, too, since that's what releases use - if not (has_branch(result, calc_revision) or has_branch(result, default_revision)): - githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', '')) - add_auth(githubreq) - result.extend(json.loads(urllib.request.urlopen(githubreq).read().decode())) - - repo_path = "device/%s/%s" % (manufacturer, device) - adding = {'repository':repo_name,'target_path':repo_path} - - fallback_branch = None - if calc_revision != default_revision and has_branch(result, calc_revision): - fallback_branch = calc_revision - if not fallback_branch and not has_branch(result, default_revision): - if os.getenv('ROOMSERVICE_BRANCHES'): - fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' '))) - for fallback in fallbacks: - if has_branch(result, fallback): - print("Using fallback branch: %s" % fallback) - fallback_branch = fallback - break - - if not fallback_branch: - print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name)) - print("Branches found:") - for branch in [branch['name'] for branch in result]: - print(branch) - print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.") - sys.exit() - - add_to_manifest([adding], fallback_branch) - - print("Syncing repository to retrieve project.") - os.system('repo sync %s' % repo_path) - print("Repository synced!") - - fetch_dependencies(repo_path, fallback_branch) - print("Done") - sys.exit() - -print("Repository for %s not found in the SlimRoms Github repository list. If this is in error, you may need to manually add it to your local_manifests/slim_manifest.xml." % device) + if not (repo_name.startswith("device_") and + repo_name.endswith("_" + device)): + continue + print("Found repository: %s" % repository['name']) + + fallback_branch = detect_revision(repository) + manufacturer = repo_name[7:-(len(device)+1)] + repo_path = "device/%s/%s" % (manufacturer, device) + adding = [{'repository': repo_name, 'target_path': repo_path}] + + add_to_manifest(adding, fallback_branch) + + print("Syncing repository to retrieve project.") + os.system('repo sync %s' % repo_path) + print("Repository synced!") + + fetch_dependencies(repo_path, fallback_branch) + print("Done") + sys.exit() + + print("Repository for %s not found in the %s Github repository list." + % (device, org_display)) + print("If this is in error, you may need to manually add it to your " + "%s" % custom_local_manifest) +if __name__ == "__main__": + main() From 1383c7067da568c868ce17618703ae9de704db69 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Wed, 29 Apr 2015 16:35:36 +0100 Subject: [PATCH 232/309] roomservice: check against the path, not name For a while now, it's projects have been allowed to fetch from the same location. The only reliable way to check is with the target path, so we use this now. Change-Id: Ia88bd3a5a56d5de2110f1c0d3ce332060732f716 --- tools/roomservice.py | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index cde9bea04..01f4cce30 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -72,13 +72,6 @@ def add_auth(g_req): g_req.add_header("Authorization", "Basic %s" % github_auth) -def exists_in_tree(lm, repo): - for child in lm.getchildren(): - if child.attrib['name'].endswith(repo): - return True - return False - - def indent(elem, level=0): # in-place prettyprint formatter i = "\n" + " " * level @@ -125,11 +118,11 @@ def get_remote(manifest=None, remote_name=None): return remote -def get_revision(manifest=None, p="android_build"): +def get_revision(manifest=None, p="build"): m = manifest or load_manifest(default_manifest) project = None for proj in m.findall('project'): - if proj.get('name').endswith(p): + if proj.get('path').strip('/') == p: project = proj break if project is None: @@ -148,17 +141,17 @@ def get_from_manifest(device_name): for man in (custom_local_manifest, default_manifest): man = load_manifest(man) for local_path in man.findall("project"): - lp = local_path.get("name") - if lp.startswith("device_") and lp.endswith("_" + device_name): - return local_path.get("path") + lp = local_path.get("path").strip('/') + if lp.startswith("device/") and lp.endswith("/" + device_name): + return lp return None -def is_in_manifest(project_name): +def is_in_manifest(project_path): for man in (custom_local_manifest, default_manifest): man = load_manifest(man) for local_path in man.findall("project"): - if local_path.get("name") == project_name: + if local_path.get("path") == project_path: return True return False @@ -169,8 +162,8 @@ def add_to_manifest(repos, fallback_branch=None): for repo in repos: repo_name = repo['repository'] repo_target = repo['target_path'] - if exists_in_tree(lm, repo_name): - print('%s already exists' % repo_name) + if is_in_manifest(repo_target): + print('already exists: %s' % repo_target) continue if "/" not in repo_name: @@ -226,10 +219,7 @@ def fetch_dependencies(repo_path, fallback_branch=None): syncable_repos = [] for dependency in dependencies: - repo_name = dependency['repository'] - if '/' not in repo_name: - repo_name = os.path.join(org_manifest, repo_name) - if not is_in_manifest(repo_name): + if not is_in_manifest(dependency['target_path']): fetch_list.append(dependency) syncable_repos.append(dependency['target_path']) From 6abd41029a52242802467105bf38ec9d21f5307b Mon Sep 17 00:00:00 2001 From: Anthony King Date: Wed, 29 Apr 2015 21:08:33 +0100 Subject: [PATCH 233/309] roomservice: add branch to deps if not provided this matches the behaviour (more or less) as if they were added in normal mode Change-Id: I1fcef29c8c34d2f74eb8e184a272610e02220ad8 --- tools/roomservice.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 01f4cce30..50abe8864 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -220,6 +220,9 @@ def fetch_dependencies(repo_path, fallback_branch=None): for dependency in dependencies: if not is_in_manifest(dependency['target_path']): + if not dependency.get('branch'): + dependency['branch'] = get_revision() + fetch_list.append(dependency) syncable_repos.append(dependency['target_path']) @@ -296,7 +299,7 @@ def detect_revision(repo): def main(): try: - depsonly = bool(sys.argv[2]) + depsonly = bool(sys.argv[2] in ['true', 1]) except IndexError: depsonly = False From 471e51f8acce1d3ae1be14e4c7d18dcd250b1777 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Wed, 29 Apr 2015 21:29:17 +0100 Subject: [PATCH 234/309] roomservice: touchup revision assignment don't assign the revision if the calculated one is the same as the default one. fallback to the custom_default_revision if can't calculate a revision in deps only mode Change-Id: I5f180ecf766b95eb41a716234d4e42d7d4e4a942 --- tools/roomservice.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 50abe8864..2c586aad7 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -221,7 +221,8 @@ def fetch_dependencies(repo_path, fallback_branch=None): for dependency in dependencies: if not is_in_manifest(dependency['target_path']): if not dependency.get('branch'): - dependency['branch'] = get_revision() + dependency['branch'] = (get_revision() or + custom_default_revision) fetch_list.append(dependency) syncable_repos.append(dependency['target_path']) @@ -259,7 +260,7 @@ def detect_revision(repo): print("Default revision: %s" % default_revision) - if has_branch(result, calc_revision): + if calc_revision != default_revision and has_branch(result, calc_revision): return calc_revision if has_branch(result, default_revision): return None @@ -271,7 +272,7 @@ def detect_revision(repo): result.extend(json.loads( urllib.request.urlopen(githubreq).read().decode())) - if has_branch(result, calc_revision): + if calc_revision != default_revision and has_branch(result, calc_revision): return calc_revision if has_branch(result, default_revision): return None From 23798257480ebccc59951ea1fb81033f6027bf1f Mon Sep 17 00:00:00 2001 From: Anthony King Date: Thu, 30 Apr 2015 23:29:25 +0100 Subject: [PATCH 235/309] roomservice: add debug method also don't chatter about Dependencies file not found if not in debug mode Change-Id: I7677b396d0c62862d7852b9da7e2fa73f44e0bf5 --- tools/roomservice.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 2c586aad7..7e4e25275 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -39,6 +39,7 @@ urllib.parse = urlparse urllib.request = urllib2 +DEBUG = False default_manifest = ".repo/manifest.xml" custom_local_manifest = ".repo/local_manifests/slim_manifest.xml" @@ -55,6 +56,11 @@ os.makedirs(local_manifests) +def debug(*args, **kwargs): + if DEBUG: + print(*args, **kwargs) + + def add_auth(g_req): global github_auth if github_auth is None: @@ -213,7 +219,7 @@ def fetch_dependencies(repo_path, fallback_branch=None): dependencies = json.load(dep_f) else: dependencies = {} - print('Dependencies file not found, bailing out.') + debug('Dependencies file not found, bailing out.') fetch_list = [] syncable_repos = [] @@ -299,11 +305,15 @@ def detect_revision(repo): def main(): + global DEBUG try: depsonly = bool(sys.argv[2] in ['true', 1]) except IndexError: depsonly = False + if os.getenv('ROOMSERVICE_DEBUG'): + DEBUG = True + product = sys.argv[1] device = product[product.find("_") + 1:] or product From 861d85b4a70e0b1b8a64a1e89cdbbf60dde6e8cf Mon Sep 17 00:00:00 2001 From: Josue Rivera Date: Tue, 6 Oct 2015 21:05:30 +0200 Subject: [PATCH 236/309] Update roomservice fallback branch Change-Id: I284e6370f19ab979569bfc70ddc1dd850640d339 Signed-off-by: Josue Rivera --- tools/roomservice.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 7e4e25275..0afdb7a14 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -43,7 +43,7 @@ default_manifest = ".repo/manifest.xml" custom_local_manifest = ".repo/local_manifests/slim_manifest.xml" -custom_default_revision = "lp5.0" +custom_default_revision = "mm6.0" custom_dependencies = "slim.dependencies" org_manifest = "SlimRoms" # leave empty if org is provided in manifest org_display = "SlimRoms" # needed for displaying From 9cec20a21355f37b168d7538861842b92412d69b Mon Sep 17 00:00:00 2001 From: "Brint E. Kriebel" Date: Wed, 12 Aug 2015 14:05:03 -0700 Subject: [PATCH 237/309] roomservice: use force-sync when adding projects with roomservice This works around the error GitError: --force-sync not enabled; cannot overwrite a local work tree when using roomservice. Since this should only trigger when the device repos haven't been checked out yet, we can use force-sync to overwrite any roomservice device paths in the .repo directory. Change-Id: Iac54a8a2f2913f82f8ca6497b8785a9d5769640b Ticket: CYNGNOS-735 --- tools/roomservice.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/roomservice.py b/tools/roomservice.py index 0afdb7a14..5f5c237cb 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -239,7 +239,7 @@ def fetch_dependencies(repo_path, fallback_branch=None): if syncable_repos: print('Syncing dependencies') - os.system('repo sync %s' % ' '.join(syncable_repos)) + os.system('repo sync --force-sync %s' % ' '.join(syncable_repos)) for deprepo in syncable_repos: fetch_dependencies(deprepo) @@ -349,6 +349,7 @@ def main(): for repository in repositories: repo_name = repository['name'] + if not (repo_name.startswith("device_") and repo_name.endswith("_" + device)): continue @@ -362,7 +363,7 @@ def main(): add_to_manifest(adding, fallback_branch) print("Syncing repository to retrieve project.") - os.system('repo sync %s' % repo_path) + os.system('repo sync --force-sync %s' % repo_path) print("Repository synced!") fetch_dependencies(repo_path, fallback_branch) From 63fd4cd8ff0ac1154cc573889fd502f5d2555dd3 Mon Sep 17 00:00:00 2001 From: Nicholas Flintham Date: Mon, 8 Dec 2014 15:02:34 +0000 Subject: [PATCH 238/309] Kill Music build. This needs to die in a fire.. Change-Id: I865724c9e1da9014a18683247d93ac45f97b8eab --- target/product/generic_no_telephony.mk | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/target/product/generic_no_telephony.mk b/target/product/generic_no_telephony.mk index 0f9935a20..0194446b9 100644 --- a/target/product/generic_no_telephony.mk +++ b/target/product/generic_no_telephony.mk @@ -22,7 +22,8 @@ PRODUCT_PACKAGES := \ BluetoothMidiService \ Camera2 \ Gallery2 \ - Music \ + Email \ + Exchange2 \ MusicFX \ OneTimeInitializer \ Provision \ From 495e92ec7f1a7f1961628b90722c7de119fe1de0 Mon Sep 17 00:00:00 2001 From: Nicholas Flintham Date: Fri, 16 Oct 2015 13:44:22 +0100 Subject: [PATCH 239/309] build: remove remote methods as they are now in vendor and cmgerrit/cmrebase commands Change-Id: I91d409de764632d4418645ea1a42399e2bd3c5ca --- envsetup.sh | 339 ---------------------------------------------------- 1 file changed, 339 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index c27ea51d6..a684f8337 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -22,11 +22,6 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y - sepgrep: Greps on all local sepolicy files. - sgrep: Greps on all local source files. - godir: Go to the directory containing a file. -- cmremote: Add git remote for CM Gerrit Review -- cmgerrit: A Git wrapper that fetches/pushes patch from/to CM Gerrit Review -- cmrebase: Rebase a Gerrit change and push it again -- aospremote: Add git remote for matching AOSP repository -- cafremote: Add git remote for matching CodeAurora repository. - mka: Builds using SCHED_BATCH on all processors - mkap: Builds the module(s) using mka and pushes them to the device. - cmka: Cleans and builds using mka. @@ -1665,49 +1660,6 @@ function godir () { \cd $T/$pathname } -function cmremote() -{ - git remote rm cmremote 2> /dev/null - if [ ! -d .git ] - then - echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. - fi - GERRIT_REMOTE=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g) - if [ -z "$GERRIT_REMOTE" ] - then - GERRIT_REMOTE=$(cat .git/config | grep http://github.com | awk '{ print $NF }' | sed s#http://github.com/##g) - if [ -z "$GERRIT_REMOTE" ] - then - echo Unable to set up the git remote, are you in the root of the repo? - return 0 - fi - fi - CMUSER=`git config --get review.review.cyanogenmod.com.username` - if [ -z "$CMUSER" ] - then - git remote add cmremote ssh://review.cyanogenmod.com:29418/$GERRIT_REMOTE - else - git remote add cmremote ssh://$CMUSER@review.cyanogenmod.com:29418/$GERRIT_REMOTE - fi - echo You can now push to "cmremote". -} - -function aospremote() -{ - git remote rm aosp 2> /dev/null - if [ ! -d .git ] - then - echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. - fi - PROJECT=`pwd -P | sed s#$ANDROID_BUILD_TOP/##g` - if (echo $PROJECT | grep -qv "^device") - then - PFX="platform/" - fi - git remote add aosp https://android.googlesource.com/$PFX$PROJECT - echo "Remote 'aosp' created" -} - function makerecipe() { if [ -z "$1" ] then @@ -1730,280 +1682,6 @@ function makerecipe() { ' } -function cmgerrit() { - if [ $# -eq 0 ]; then - $FUNCNAME help - return 1 - fi - local user=`git config --get review.review.cyanogenmod.com.username` - local review=`git config --get remote.github.review` - local project=`git config --get remote.github.projectname` - local command=$1 - shift - case $command in - help) - if [ $# -eq 0 ]; then - cat <&2 "Gerrit username not found." - return 1 - fi - local local_branch remote_branch - case $1 in - *:*) - local_branch=${1%:*} - remote_branch=${1##*:} - ;; - *) - local_branch=HEAD - remote_branch=$1 - ;; - esac - shift - git push $@ ssh://$user@$review:29418/$project \ - $local_branch:refs/for/$remote_branch || return 1 - ;; - changes|for) - if [ "$FUNCNAME" = "cmgerrit" ]; then - echo >&2 "'$FUNCNAME $command' is deprecated." - fi - ;; - __cmg_err_no_arg) - if [ $# -lt 2 ]; then - echo >&2 "'$FUNCNAME $command' missing argument." - elif [ $2 -eq 0 ]; then - if [ -n "$3" ]; then - $FUNCNAME help $1 - else - echo >&2 "'$FUNCNAME $1' missing argument." - fi - else - return 1 - fi - ;; - __cmg_err_not_repo) - if [ -z "$review" -o -z "$project" ]; then - echo >&2 "Not currently in any reviewable repository." - else - return 1 - fi - ;; - __cmg_err_not_supported) - $FUNCNAME __cmg_err_no_arg $command $# && return - case $1 in - #TODO: filter more git commands that don't use refname - init|add|rm|mv|status|clone|remote|bisect|config|stash) - echo >&2 "'$FUNCNAME $1' is not supported." - ;; - *) return 1 ;; - esac - ;; - #TODO: other special cases? - *) - $FUNCNAME __cmg_err_not_supported $command && return 1 - $FUNCNAME __cmg_err_no_arg $command $# help && return 1 - $FUNCNAME __cmg_err_not_repo && return 1 - local args="$@" - local change pre_args refs_arg post_args - case "$args" in - *--\ *) - pre_args=${args%%-- *} - post_args="-- ${args#*-- }" - ;; - *) pre_args="$args" ;; - esac - args=($pre_args) - pre_args= - if [ ${#args[@]} -gt 0 ]; then - change=${args[${#args[@]}-1]} - fi - if [ ${#args[@]} -gt 1 ]; then - pre_args=${args[0]} - for ((i=1; i<${#args[@]}-1; i++)); do - pre_args="$pre_args ${args[$i]}" - done - fi - while ((1)); do - case $change in - ""|--) - $FUNCNAME help $command - return 1 - ;; - *@*) - if [ -z "$refs_arg" ]; then - refs_arg="@${change#*@}" - change=${change%%@*} - fi - ;; - *~*) - if [ -z "$refs_arg" ]; then - refs_arg="~${change#*~}" - change=${change%%~*} - fi - ;; - *^*) - if [ -z "$refs_arg" ]; then - refs_arg="^${change#*^}" - change=${change%%^*} - fi - ;; - *:*) - if [ -z "$refs_arg" ]; then - refs_arg=":${change#*:}" - change=${change%%:*} - fi - ;; - *) break ;; - esac - done - $FUNCNAME fetch $change \ - && git $command $pre_args FETCH_HEAD$refs_arg $post_args \ - || return 1 - ;; - esac -} - -function cmrebase() { - local repo=$1 - local refs=$2 - local pwd="$(pwd)" - local dir="$(gettop)/$repo" - - if [ -z $repo ] || [ -z $refs ]; then - echo "CyanogenMod Gerrit Rebase Usage: " - echo " cmrebase " - echo " The patch IDs appear on the Gerrit commands that are offered." - echo " They consist on a series of numbers and slashes, after the text" - echo " refs/changes. For example, the ID in the following command is 26/8126/2" - echo "" - echo " git[...]ges_apps_Camera refs/changes/26/8126/2 && git cherry-pick FETCH_HEAD" - echo "" - return - fi - - if [ ! -d $dir ]; then - echo "Directory $dir doesn't exist in tree." - return - fi - cd $dir - repo=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g) - echo "Starting branch..." - repo start tmprebase . - echo "Bringing it up to date..." - repo sync . - echo "Fetching change..." - git fetch "http://review.cyanogenmod.com/p/$repo" "refs/changes/$refs" && git cherry-pick FETCH_HEAD - if [ "$?" != "0" ]; then - echo "Error cherry-picking. Not uploading!" - return - fi - echo "Uploading..." - repo upload . - echo "Cleaning up..." - repo abandon tmprebase . - cd $pwd -} - function mka() { case `uname -s` in Darwin) @@ -2076,23 +1754,6 @@ function fixup_common_out_dir() { fi } -function cafremote() -{ - git remote rm caf 2> /dev/null - if [ ! -d .git ] - then - echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up. - fi - PROJECT=`pwd -P | sed s#$ANDROID_BUILD_TOP/##g` - if (echo $PROJECT | grep -qv "^device") - then - PFX="platform/" - fi - git remote add caf git://codeaurora.org/$PFX$PROJECT - echo "Remote 'caf' created" -} - - function installboot() { if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ]; From cf73d0fb0ce2e23d8eb597d61e9868ba35dfe0ef Mon Sep 17 00:00:00 2001 From: Anthony King Date: Mon, 4 May 2015 13:38:53 +0100 Subject: [PATCH 240/309] envsetup: hmm: Put descriptions into an array this allows finer grain manipulation at a later point Change-Id: I0e7b9764ba90a39da8935ee46905cf9a9e571560 --- envsetup.sh | 79 +++++++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index a684f8337..cb04839b1 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1,44 +1,45 @@ + +export HMM_DESCRIPTIVE=( +"lunch: lunch -" +"tapas: tapas [ ...] [arm|x86|mips|armv5|arm64|x86_64|mips64] [eng|userdebug|user]" +"croot: Changes directory to the top of the tree." +"cout: Changes directory to out." +"m: Makes from the top of the tree." +"mm: Builds all of the modules in the current directory, but not their dependencies." +"mmm: Builds all of the modules in the supplied directories, but not their dependencies. + To limit the modules being built use the syntax: mmm dir/:target1,target2." +"mma: Builds all of the modules in the current directory, and their dependencies." +"mmp: Builds all of the modules in the current directory and pushes them to the device." +"mmmp: Builds all of the modules in the supplied directories and pushes them to the device." +"mmma: Builds all of the modules in the supplied directories, and their dependencies." +"cgrep: Greps on all local C/C++ files." +"ggrep: Greps on all local Gradle files." +"jgrep: Greps on all local Java files." +"resgrep: Greps on all local res/*.xml files." +"mangrep: Greps on all local AndroidManifest.xml files." +"sepgrep: Greps on all local sepolicy files." +"sgrep: Greps on all local source files." +"godir: Go to the directory containing a file." +"mka: Builds using SCHED_BATCH on all processors" +"mkap: Builds the module(s) using mka and pushes them to the device." +"cmka: Cleans and builds using mka." +"repolastsync: Prints date and time of last repo sync" +"reposync: Parallel repo sync using ionice and SCHED_BATCH" +"repopick: Utility to fetch changes from Gerrit." +"installboot: Installs a boot.img to the connected device." +"installrecovery: Installs a recovery.img to the connected device." +) + function hmm() { -cat <- -- tapas: tapas [ ...] [arm|x86|mips|armv5|arm64|x86_64|mips64] [eng|userdebug|user] -- croot: Changes directory to the top of the tree. -- cout: Changes directory to out. -- m: Makes from the top of the tree. -- mm: Builds all of the modules in the current directory, but not their dependencies. -- mmm: Builds all of the modules in the supplied directories, but not their dependencies. - To limit the modules being built use the syntax: mmm dir/:target1,target2. -- mma: Builds all of the modules in the current directory, and their dependencies. -- mmp: Builds all of the modules in the current directory and pushes them to the device. -- mmmp: Builds all of the modules in the supplied directories and pushes them to the device. -- mmma: Builds all of the modules in the supplied directories, and their dependencies. -- cgrep: Greps on all local C/C++ files. -- ggrep: Greps on all local Gradle files. -- jgrep: Greps on all local Java files. -- repopick: Utility to fetch changes from Gerrit. -- resgrep: Greps on all local res/*.xml files. -- mangrep: Greps on all local AndroidManifest.xml files. -- sepgrep: Greps on all local sepolicy files. -- sgrep: Greps on all local source files. -- godir: Go to the directory containing a file. -- mka: Builds using SCHED_BATCH on all processors -- mkap: Builds the module(s) using mka and pushes them to the device. -- cmka: Cleans and builds using mka. -- repolastsync: Prints date and time of last repo sync. -- reposync: Parallel repo sync using ionice and SCHED_BATCH -- installboot: Installs a boot.img to the connected device. -- installrecovery: Installs a recovery.img to the connected device. - - -Environemnt options: -- SANITIZE_HOST: Set to 'true' to use ASAN for all host modules. Note that - ASAN_OPTIONS=detect_leaks=0 will be set by default until the - build is leak-check clean. - -Look at the source to view more functions. The complete list is: -EOF T=$(gettop) + + echo "Invoke \". build/envsetup.sh\" from your shell to add the following functions to your environment:" + for c in ${!HMM_DESCRIPTIVE[*]}; do + echo -e "- ${HMM_DESCRIPTIVE[$c]}" + done + + echo + echo "Look at the source to view more functions. The complete list is:" for i in `cat $T/build/envsetup.sh | sed -n "/^[ \t]*function /s/function \([a-z_]*\).*/\1/p" | sort | uniq`; do echo "$i" done | column From c4f19a0e06b15bab5580953ba53e76a890d41596 Mon Sep 17 00:00:00 2001 From: Nicholas Flintham Date: Fri, 16 Oct 2015 14:04:48 +0100 Subject: [PATCH 241/309] update releasekey paths Change-Id: Iced3d549d149abe1b6ff52e0e8dccf6f13a9f240 --- tools/releasetools/edify_generator.py | 2 +- tools/releasetools/ota_from_target_files.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index d7ecb98ec..0c34fc0e9 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -151,7 +151,7 @@ def RunBackup(self, command): self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command)) def ValidateSignatures(self, command): - self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");') + self.script.append('package_extract_file("META-INF/org/slimroms/releasekey", "/tmp/releasekey");') # Exit code 124 == abort. run_program returns raw, so left-shift 8bit self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index 06ec700ae..29ceaff95 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -760,7 +760,7 @@ def output_sink(fn, data): common.ZipWriteStr(output_zip, "system/build.prop", ""+input_zip.read("SYSTEM/build.prop")) - common.ZipWriteStr(output_zip, "META-INF/org/cyanogenmod/releasekey", + common.ZipWriteStr(output_zip, "META-INF/org/slimroms/releasekey", ""+input_zip.read("META/releasekey.txt")) def WritePolicyConfig(file_name, output_zip): From 2d688fccfab02b7aca3576e5ae847158019b4841 Mon Sep 17 00:00:00 2001 From: LuK1337 Date: Wed, 14 Oct 2015 21:40:09 +0200 Subject: [PATCH 242/309] build: Allow devices to specify a WLAN variant Change-Id: I59616fa641c4ccddea7eb5fb77e9aeb0423ef234 --- core/qcom_target.mk | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 17c93354e..117ec5c66 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -7,6 +7,9 @@ endef define ril-set-path-variant $(call project-set-path-variant,ril,TARGET_RIL_VARIANT,hardware/$(1)) endef +define wlan-set-path-variant +$(call project-set-path-variant,wlan,TARGET_WLAN_VARIANT,hardware/qcom/$(1)) +endef define gps-hal-set-path-variant $(call project-set-path-variant,gps-hal,TARGET_GPS_HAL_PATH,$(1)) endef @@ -69,6 +72,7 @@ $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media-caf/$(QCOM_HARDWARE_VARIANT)) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) +$(call wlan-set-path-variant,wlan) $(call loc-api-set-path-variant,vendor/qcom/opensource/location) $(call gps-hal-set-path-variant,hardware/qcom/gps) else @@ -79,6 +83,7 @@ $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media/default) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) +$(call wlan-set-path-variant,wlan) $(call loc-api-set-path-variant,vendor/qcom/opensource/location) $(call gps-hal-set-path-variant,hardware/qcom/gps) endif From 3a06ea3edabeac38f6357423cec866634bcaa103 Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Mon, 12 Oct 2015 11:59:22 -0700 Subject: [PATCH 243/309] build: Fix BOARD_CUSTOM_BOOTIMG_MK for recovery target. Change-Id: I3daf0b6288421c3880c8b3a71c15bafb87ae3df3 --- core/Makefile | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/core/Makefile b/core/Makefile index e17312495..afbdefd8a 100644 --- a/core/Makefile +++ b/core/Makefile @@ -955,15 +955,17 @@ define build-recoveryimage-target $(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop - $(hide) $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ - $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ - $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\ - $(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)) - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ - $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) - $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) + $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) + + $(if $(filter $(BOARD_CUSTOM_BOOTIMG_MK),), + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ + $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ + $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\ + $(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)) + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ + $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) + $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))) @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} endef @@ -971,11 +973,11 @@ $(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ -$(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ +$(recovery_ramdisk): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(INSTALLED_RAMDISK_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ $(INTERNAL_RECOVERYIMAGE_FILES) \ - $(recovery_initrc) $(recovery_sepolicy) $(recovery_kernel) \ + $(recovery_initrc) $(recovery_sepolicy) \ $(INSTALLED_2NDBOOTLOADER_TARGET) \ $(recovery_build_prop) $(recovery_resource_deps) $(recovery_root_deps) \ $(recovery_fstab) \ @@ -983,15 +985,7 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ $(call build-recoveryimage-target, $@) ifndef BOARD_CUSTOM_BOOTIMG_MK -$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) \ - $(recovery_kernel) - $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ -ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)) - $(BOOT_SIGNER) /recovery $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $@ -endif - $(hide) $(call assert-max-image-size,$@,$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) - @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} - +$(INSTALLED_RECOVERYIMAGE_TARGET): $(recovery_ramdisk) $(recovery_kernel) endif # BOARD_CUSTOM_BOOTIMG_MK recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch) From aa26e6ea8a0420dc608994e81400ef1a0cb68894 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 16 Oct 2015 20:38:10 -0700 Subject: [PATCH 244/309] build: Add bt-vendor variant and fix wlan variant Change-Id: Ie3c433420504f0f14783f99e2d5fcdd46f1a3f37 --- core/qcom_target.mk | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 117ec5c66..b9314d3e3 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -10,6 +10,9 @@ endef define wlan-set-path-variant $(call project-set-path-variant,wlan,TARGET_WLAN_VARIANT,hardware/qcom/$(1)) endef +define bt-vendor-set-path-variant +$(call project-set-path-variant,bt-vendor,TARGET_BT_VENDOR_VARIANT,hardware/qcom/$(1)) +endef define gps-hal-set-path-variant $(call project-set-path-variant,gps-hal,TARGET_GPS_HAL_PATH,$(1)) endef @@ -72,7 +75,8 @@ $(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media-caf/$(QCOM_HARDWARE_VARIANT)) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) -$(call wlan-set-path-variant,wlan) +$(call wlan-set-path-variant,wlan-caf) +$(call bt-vendor-set-path-variant,bt-caf) $(call loc-api-set-path-variant,vendor/qcom/opensource/location) $(call gps-hal-set-path-variant,hardware/qcom/gps) else @@ -84,6 +88,7 @@ $(call project-set-path,qcom-media,hardware/qcom/media/default) $(call qcom-set-path-variant,SENSORS,sensors) $(call ril-set-path-variant,ril) $(call wlan-set-path-variant,wlan) +$(call bt-vendor-set-path-variant,bt) $(call loc-api-set-path-variant,vendor/qcom/opensource/location) $(call gps-hal-set-path-variant,hardware/qcom/gps) endif From 8da0778104779ddbe2ddc51515bb42e60d9064dc Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Sat, 17 Oct 2015 02:29:25 -0400 Subject: [PATCH 245/309] build: Actually build the recovery.img if !BOARD_CUSTOM_BOOTIMG_MK Fix the BOARD_CUSTOM_BOOTIMG_MK if and call build of recovery.img also removed unused variable RECOVERYIMAGE_EXTRA_DEPS until associated commit is merged Change-Id: I94f5f26bc01e03d761ccc260342f330a4a06b141 --- core/Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/Makefile b/core/Makefile index afbdefd8a..0a6a8abce 100644 --- a/core/Makefile +++ b/core/Makefile @@ -957,7 +957,7 @@ define build-recoveryimage-target > $(TARGET_RECOVERY_ROOT_OUT)/default.prop $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) - $(if $(filter $(BOARD_CUSTOM_BOOTIMG_MK),), + $(if !$(BOARD_CUSTOM_BOOTIMG_MK), $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) @@ -973,7 +973,7 @@ $(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ -$(recovery_ramdisk): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_DEPS) \ +$(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) \ $(INSTALLED_RAMDISK_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ $(INTERNAL_RECOVERYIMAGE_FILES) \ @@ -982,10 +982,10 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) $(RECOVERYIMAGE_EXTRA_ $(recovery_build_prop) $(recovery_resource_deps) $(recovery_root_deps) \ $(recovery_fstab) \ $(RECOVERY_INSTALL_OTA_KEYS) - $(call build-recoveryimage-target, $@) ifndef BOARD_CUSTOM_BOOTIMG_MK -$(INSTALLED_RECOVERYIMAGE_TARGET): $(recovery_ramdisk) $(recovery_kernel) +$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel) + $(call build-recoveryimage-target, $@) endif # BOARD_CUSTOM_BOOTIMG_MK recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch) From fcdf6ca4d1fa7b8a791c5e7bab084aa858809b94 Mon Sep 17 00:00:00 2001 From: Abhinav1997 Date: Sun, 18 Oct 2015 19:49:13 +0200 Subject: [PATCH 246/309] envsetup: Show error when supplied dir isn't present with mmm When a directory isn't present with mmm, don't show "No Android.mk present", rather show that the directory isn't present Change-Id: I7259a60012c6f30c470daa60d5a5097d01ffc4c7 Signed-off-by: Abhinav1997 --- envsetup.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index cb04839b1..58593a6bc 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -919,7 +919,12 @@ function mmm() case $DIR in showcommands | snod | dist | incrementaljavac | *=*) ARGS="$ARGS $DIR";; GET-INSTALL-PATH) GET_INSTALL_PATH=$DIR;; - *) echo "No Android.mk in $DIR."; return 1;; + *) if [ -d $DIR ]; then + echo "No Android.mk in $DIR."; + else + echo "Couldn't locate the directory $DIR"; + fi + return 1;; esac fi done From 4ef703892969ef90fd8551a7e6e21f3cc6a2bf1f Mon Sep 17 00:00:00 2001 From: Gabriele M Date: Wed, 21 Oct 2015 23:29:05 +0200 Subject: [PATCH 247/309] build: Fix check on BOARD_CUSTOM_BOOTIMG_MK In the current code, '!' is added as prefix to the string contained in $(BOARD_CUSTOM_BOOTIMG_MK). This means that !$(BOARD_CUSTOM_BOOTIMG_MK) is never an empty string and therefore the condition is always true. The if function has a third optional argument that is evalueted when the condition is false, so use it to fix the current code. Change-Id: If1e459f6c40f655b47a12b77761921c943964e3b --- core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/Makefile b/core/Makefile index 0a6a8abce..67f207347 100644 --- a/core/Makefile +++ b/core/Makefile @@ -957,7 +957,7 @@ define build-recoveryimage-target > $(TARGET_RECOVERY_ROOT_OUT)/default.prop $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) - $(if !$(BOARD_CUSTOM_BOOTIMG_MK), + $(if $(BOARD_CUSTOM_BOOTIMG_MK),, $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) From 5de25916bbf5b2033302a4932b3c28aa3cf1604b Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Wed, 21 Oct 2015 23:56:59 +0300 Subject: [PATCH 248/309] More custom mkbootimg cleanup * Prevent duplicate 'Made recovery image:' message when using custom mkbootimg Change-Id: I178321bcb5661e07833b1b56b5c9e2cf20e421d7 --- core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/Makefile b/core/Makefile index 67f207347..fa7bc1c01 100644 --- a/core/Makefile +++ b/core/Makefile @@ -583,7 +583,7 @@ bootimage-nodeps: $(MKBOOTIMG) $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)) @echo -e ${CL_INS}"Made boot image: $@"${CL_RST} - endif # PRODUCT_SUPPORTS_VERITY + endif # PRODUCT_SUPPORTS_VBOOT endif # PRODUCT_SUPPORTS_BOOT_SIGNER / BOARD_CUSTOM_BOOTIMG_MK else # TARGET_NO_KERNEL From b05ac70fee7ffa092fdb49d95cc4284127b8ddd4 Mon Sep 17 00:00:00 2001 From: Roman Birg Date: Fri, 16 Oct 2015 10:29:06 -0700 Subject: [PATCH 249/309] fix success/failure coloring for darwin what's the point of OS X if it's not pretty Change-Id: I7a1ed0766adead46b46092acf826c6ef270edd10 Signed-off-by: Roman Birg --- envsetup.sh | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 58593a6bc..859aa5fae 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1972,9 +1972,9 @@ function pez { local retval=$? if [ $retval -ne 0 ] then - echo -e "\e[0;31mFAILURE\e[00m" + printf "\e[0;31mFAILURE\e[00m\n" else - echo -e "\e[0;32mSUCCESS\e[00m" + printf "\e[0;32mSUCCESS\e[00m\n" fi return $retval } @@ -1998,7 +1998,7 @@ function mk_timer() if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then color_failed="\e[0;31m" color_success="\e[0;32m" - color_reset="\e[00m" + color_reset="\e[0m" else color_failed="" color_success="" @@ -2006,9 +2006,9 @@ function mk_timer() fi echo if [ $ret -eq 0 ] ; then - echo -n -e "${color_success}#### make completed successfully " + printf "${color_success}#### make completed successfully " else - echo -n -e "${color_failed}#### make failed to build some targets " + printf "${color_failed}#### make failed to build some targets " fi if [ $hours -gt 0 ] ; then printf "(%02g:%02g:%02g (hh:mm:ss))" $hours $mins $secs @@ -2017,8 +2017,7 @@ function mk_timer() elif [ $secs -gt 0 ] ; then printf "(%s seconds)" $secs fi - echo -e " ####${color_reset}" - echo + printf " ####${color_reset}\n\n" return $ret } From e06a644bf08bb50666e289ae13c09e9731bf93ba Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Tue, 8 Sep 2015 10:57:18 -0500 Subject: [PATCH 250/309] build: Separate commands in recovery foreach loops The foreach loops output a space separated list, not necessarily one command per line. Separate commands with semicolons. Notably, this fixes an issue with multiple device directories included in TARGET_RECOVERY_DEVICE_DIRS. Change-Id: I5ebdc84ecaaacabaea77b8b752141e2041622480 --- core/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/Makefile b/core/Makefile index fa7bc1c01..a0b7ab09c 100644 --- a/core/Makefile +++ b/core/Makefile @@ -947,11 +947,11 @@ define build-recoveryimage-target $(hide) cp -rf $(recovery_resources_common)/* $(TARGET_RECOVERY_ROOT_OUT)/res $(hide) cp -f $(recovery_font) $(TARGET_RECOVERY_ROOT_OUT)/res/images/font.png $(hide) $(foreach item,$(recovery_root_private), \ - cp -rf $(item) $(TARGET_RECOVERY_OUT)/) + cp -rf $(item) $(TARGET_RECOVERY_OUT)/;) $(hide) $(foreach item,$(recovery_resources_private), \ - cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/) + cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/;) $(hide) $(foreach item,$(recovery_fstab), \ - cp -f $(item) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.fstab) + cp -f $(item) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.fstab;) $(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop From 2f00ea355d4c3d4be20f59d25e62aa731f33525d Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Thu, 1 Oct 2015 15:37:55 -0700 Subject: [PATCH 251/309] build: Fix import build step. Change-Id: Id5a042bd12cf37eec7c856a22d5a7de0e175c2e7 --- core/apicheck_msg_current.txt | 67 +++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/core/apicheck_msg_current.txt b/core/apicheck_msg_current.txt index 2e065aeff..5ca7fa025 100644 --- a/core/apicheck_msg_current.txt +++ b/core/apicheck_msg_current.txt @@ -10,6 +10,73 @@ To make these errors go away, you have two choices: make update-api ^^^^^^^^^^^^^^^^^^ + CONGRATS YOU EARNED A QUAIL STAR! + + M + MM + MMM + M.MM + MM M + 7M MM + MMMMM MMMMM + MMMMM .MMMMM + MMMM MMMM + MM M + MM M .M + M+M MMMM + .M++MM .MM + MM+++MM MM + 8NNNNN MM+++++MM + NNNN $Z8. MM+++++MM MM + MM $Z8M7IMNN+++++MM MM + .$$$D ~NNMNN+++++MM MMMM + INNNNM NMNM++++++M M M + NNO:NI=MM+++++++MM MM MM + 8M$MMMMMD?+++++++MM .MMMMMMMMMMMMMMM MMMMN MMMMM + M$$NMMMMMM$++++++++MMMMMMM=+++++++++++++MM MMMMM MMMMM + M77$IMMMMMN.,+++++++++++++++++++++++++++MM .MMMMM MMMMM + .??I8,?M777OM.?+++++++++++++++++++++++++MM MM MM + O==?M7MM$MMI7$.~M+++++++++++++++++++++++MM .M M + NMMM+~M??MMMMMMMMMMMI$$++++++++++++++++++++MM MMMM + MMMM++++MM~=+I$OMMMOO?7M$Z$$$+++++++++++++++++MM MM + NMMM++++++++~~MO~7$OM8O8OMZZ$Z$M$$M++++++++++++++MM7MMM MM + MMMM++++++++++++==D~M~:8N88MMOMMZDM$$Z$$M+++++++++++MM77777MMM +MMM+++++++++++++++~MM~~M $O,NM88MOMMZ$$MM$$$+++++++++MM777777777MMMM + MMM++++++++++++M~M~IMMMO888NMOMMOZM$ZZDZ$$+++++++MM7777777777777OMMZ + MMM+++++++++++~~M~~MDOOMMO8NOOOOZZ$$Z.Z$$M++++MM77777777777777777MMM + MMM++++++++M.Z, D+ 8O88M8D,OOMDZZ$D.$$$N+++M7MMMMMD77777777777777MMM + .MM+++++++MM:.D:ZMMM8888OOOOOOZZ$ND$$$M++MM777777MMMM7777777777777MMD + MMM+++++~M.$.M~,~7M8?MON MOOZZ$$N$$$M++MD777777777MMMM77777777777MMM + MM=+++=ZMZ.MM MMZOOOO88OOZM$M.$$$$+++M7777777777777MMMM7777777777MM + MMM++MM~,,$M.+~M$OOMOOMZMI$$$$$$$++MM7777777777777777MMM777777777MM + MM++++=. ~$$.$.M~M$MZOM7MMZ$$$$$$++MMMMMMD7777777777777MMMI7777777MMM + .M++++++MM+OMI$7M??N+OZM8MMMD$$M$$++M77777MMMMN77777777777MMM7777777MMM + M++++++++M+=?+++++++++++MNMZN$$N$$+MM777777777MMMM7777777777MMM777777MM, + M+++++M=?7$$M+++++++++++++++$NO$$$$+M7777777777777MMMM777777777MMM77777MM + M++~M$M$M+++++M++MMM++++++++++M=$$D$MMMMMMMM7777777777MMM$7777777MMM77777MM + M+M$$$M+++++++++MM MMMMM+++++++M$Z$$M MMMMMI7777777MMMM7777777MM77777MM + M++7NMIN++Z++NMM MMMMM+++N$M$M MMMM7777777MMM777777MM$777MM + M=++8+++++++MM MMMMMZ$M$M MMMM777777MMM77777MMZ777MM + MM++++++++MM MM$ MMM77777MMM77777MM7777MM + MM++++++MM MMMM7777MMM7777MM777MM + MM++++MMM .MMM7777MM7777MM77$M + MM+++MM M MMM777MMN777MM77MM + NM+MM M MMM77MMM77NMM7MM + MM MM MMM77MMM77MM77M + .MMM MMM7MMM7IMM7MM + MM M MMM7MMM7MM7MM + M MM MM7MMN7MMMM + MMMM MMMM MMMMMIMMMM + MMMM. MMMMM MMMMMMMMM + MMMMM MMMMM MMMMMMMM + MM MM OMMMMMM + M MM MMMMMM + MM M MMMMM + MMM MMM + MM MM + M + + NO. NO. STOP BEING LAZY. SERIOUSLY. DO NOT DO THIS in CM. THIS IS A LIE. IT WILL BREAK THINGS. From c7b17eca7acca6f9271b21a169bf31fdd5dd04a1 Mon Sep 17 00:00:00 2001 From: Steve Kondik Date: Fri, 10 Jul 2015 00:58:53 -0700 Subject: [PATCH 252/309] build: Fix image creation for F2FS-only configuration * Add f2fs dependencies regardless of whether we're building ext4 images or not. Change-Id: I886c52edaf56eb96fb931384440f33b00aa5c9ec --- core/Makefile | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/core/Makefile b/core/Makefile index a0b7ab09c..1fd44cfdb 100644 --- a/core/Makefile +++ b/core/Makefile @@ -729,6 +729,12 @@ INTERNAL_USERIMAGES_EXT_VARIANT := ext4 endif endif endif +ifeq ($(TARGET_USERIMAGES_USE_F2FS),true) +INTERNAL_USERIMAGES_USE_F2FS := true +ifeq ($(INTERNAL_USERIMAGES_EXT_VARIANT),) +INTERNAL_USERIMAGES_EXT_VARIANT := f2fs +endif +endif # These options tell the recovery updater/installer how to mount the partitions writebale. # =[|]... @@ -741,12 +747,12 @@ ifneq (true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED)) INTERNAL_USERIMAGES_SPARSE_EXT_FLAG := -s endif +INTERNAL_USERIMAGES_DEPS := ifeq ($(INTERNAL_USERIMAGES_USE_EXT),true) -INTERNAL_USERIMAGES_DEPS := $(SIMG2IMG) INTERNAL_USERIMAGES_DEPS += $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(E2FSCK) -ifeq ($(TARGET_USERIMAGES_USE_F2FS),true) -INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG) $(MAKE_F2FS) endif +ifeq ($(INTERNAL_USERIMAGES_USE_F2FS),true) +INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG) $(MAKE_F2FS) endif ifeq ($(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs) @@ -757,6 +763,8 @@ ifeq ($(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs) INTERNAL_USERIMAGES_DEPS += $(MAKE_SQUASHFS) $(MKSQUASHFSUSERIMG) $(IMG2SIMG) endif +INTERNAL_USERIMAGES_DEPS += $(SIMG2IMG) + INTERNAL_USERIMAGES_BINARY_PATHS := $(sort $(dir $(INTERNAL_USERIMAGES_DEPS))) ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)) From 12ba25391610ebf36f14cd6089b9621752954cf4 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Fri, 23 Oct 2015 20:15:03 +0300 Subject: [PATCH 253/309] Fix TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT mismerge Change-Id: I33164a44a40ef3b925c728d89d7d72aae3da2060 --- core/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index 1fd44cfdb..d9fc5baf9 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1756,8 +1756,8 @@ ifeq ($(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT),) $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root) else - $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ - $(hide) $(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT) $(zip_root) $(zip_root) + $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \ + $(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT) $(zip_root) $(zip_root) endif ifdef PRODUCT_DEFAULT_DEV_CERTIFICATE $(hide) build/tools/getb64key.py $(PRODUCT_DEFAULT_DEV_CERTIFICATE).x509.pem > $(zip_root)/META/releasekey.txt From 9efed5c39b3f51573e18c7e770c82f9c71aa72c2 Mon Sep 17 00:00:00 2001 From: Adnan Begovic Date: Wed, 15 Apr 2015 12:00:43 -0700 Subject: [PATCH 254/309] build/core: Define find-other-aidl-files. Useful when utilizing relative paths that mention external projects. Mimics find-other-java-files, etc. Change-Id: I3df67f4f35a931facbb1de76936936b092a42bb2 --- core/definitions.mk | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/definitions.mk b/core/definitions.mk index 631ef4d80..2db4e1b54 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -394,6 +394,10 @@ define find-other-java-files $(call all-java-files-under,$(1)) endef +define find-other-aidl-files + $(call find-subdir-files,$(1) -name "*.aidl" -and -not -name ".*") +endef + define find-other-html-files $(call all-html-files-under,$(1)) endef From f02454ce9563b9d4d1f75ded6f6686a8fda69e57 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Fri, 23 Oct 2015 20:23:21 +0300 Subject: [PATCH 255/309] Fix ccache mismerges * ccache config has moved to a separate makefile in M Change-Id: If1f426e647715f72c09f2339a61ac2a13121b477 --- core/ccache.mk | 10 ++++++- core/combo/select.mk | 62 -------------------------------------------- 2 files changed, 9 insertions(+), 63 deletions(-) diff --git a/core/ccache.mk b/core/ccache.mk index 34e5e1c38..d27f5a595 100644 --- a/core/ccache.mk +++ b/core/ccache.mk @@ -30,7 +30,9 @@ ifneq ($(USE_CCACHE),) # We don't really use system headers much so the rootdir is # fine; ensures these paths are relative for all Android trees # on a workstation. - export CCACHE_BASEDIR := / + ifeq ($(CCACHE_BASEDIR),) + export CCACHE_BASEDIR := $(ANDROID_BUILD_TOP) + endif # Workaround for ccache with clang. # See http://petereisentraut.blogspot.com/2011/09/ccache-and-clang-part-2.html @@ -52,6 +54,12 @@ ifneq ($(USE_CCACHE),) ifndef CXX_WRAPPER CXX_WRAPPER := $(ccache) endif + ifeq ($(ANDROID_CCACHE_DIR), $(CCACHE_DIR)) + ifneq ($(ANDROID_CCACHE_SIZE),) + ACCSIZE_RESULT := $(shell $(ccache) -M$(ANDROID_CCACHE_SIZE)) + endif + endif ccache = + ACCSIZE_RESULT = endif endif diff --git a/core/combo/select.mk b/core/combo/select.mk index 7c80c5fef..df12e7e38 100644 --- a/core/combo/select.mk +++ b/core/combo/select.mk @@ -47,65 +47,3 @@ $(combo_var_prefix)STATIC_LIB_SUFFIX := .a # Now include the combo for this specific target. include $(BUILD_COMBOS)/$(combo_target)$(combo_os_arch).mk - -ifneq ($(USE_CCACHE),) - # The default check uses size and modification time, causing false misses - # since the mtime depends when the repo was checked out - export CCACHE_COMPILERCHECK := content - - # See man page, optimizations to get more cache hits - # implies that __DATE__ and __TIME__ are not critical for functionality. - # Ignore include file modification time since it will depend on when - # the repo was checked out - export CCACHE_SLOPPINESS := time_macros,include_file_mtime,file_macro - - # Turn all preprocessor absolute paths into relative paths. - # Fixes absolute paths in preprocessed source due to use of -g. - # We don't really use system headers much so the rootdir is - # fine; ensures these paths are relative for all Android trees - # on a workstation. - ifeq ($(CCACHE_BASEDIR),) - export CCACHE_BASEDIR := $(ANDROID_BUILD_TOP) - endif - - # Workaround for ccache with clang. - # See http://petereisentraut.blogspot.com/2011/09/ccache-and-clang-part-2.html - export CCACHE_CPP2 := true - - CCACHE_HOST_TAG := $(HOST_PREBUILT_TAG) - # If we are cross-compiling Windows binaries on Linux - # then use the linux ccache binary instead. - ifeq ($(HOST_OS)-$(BUILD_OS),windows-linux) - CCACHE_HOST_TAG := linux-$(HOST_PREBUILT_ARCH) - endif - ccache := prebuilts/misc/$(CCACHE_HOST_TAG)/ccache/ccache - # Check that the executable is here. - ccache := $(strip $(wildcard $(ccache))) - ifdef ccache - ifndef CC_WRAPPER - CC_WRAPPER := $(ccache) - endif - ifndef CXX_WRAPPER - CXX_WRAPPER := $(ccache) - endif - ifeq ($(ANDROID_CCACHE_DIR), $(CCACHE_DIR)) - ifneq ($(ANDROID_CCACHE_SIZE),) - ACCSIZE_RESULT := $(shell $(ccache) -M$(ANDROID_CCACHE_SIZE)) - endif - endif - ccache = - ACCSIZE_RESULT = - endif -endif - -# The C/C++ compiler can be wrapped by setting the CC/CXX_WRAPPER vars. -ifdef CC_WRAPPER - ifneq ($(CC_WRAPPER),$(firstword $($(combo_var_prefix)CC))) - $(combo_var_prefix)CC := $(CC_WRAPPER) $($(combo_var_prefix)CC) - endif -endif -ifdef CXX_WRAPPER - ifneq ($(CXX_WRAPPER),$(firstword $($(combo_var_prefix)CXX))) - $(combo_var_prefix)CXX := $(CXX_WRAPPER) $($(combo_var_prefix)CXX) - endif -endif From 89aa64a63b9636a878d6598799811d9ddc5f013e Mon Sep 17 00:00:00 2001 From: arter97 Date: Tue, 9 Sep 2014 11:44:22 +0900 Subject: [PATCH 256/309] Apply correct, optimized mfpu compiler flag for ARMv7-A Cortex CPUs For those ARMv7-A Cortex CPUs that can handle VFPv4 floating point, We can set "-mfpu=neon-vfpv4" instead of generic "-mfpu=neon" to gain extra performance improvements. References : - GCC : https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html - Cortex A15 : http://www.arm.com/products/processors/cortex-a/cortex-a15.php - Cortex A9 : http://www.arm.com/products/processors/cortex-a/cortex-a9.php - Cortex A8 : http://www.arm.com/products/processors/cortex-a/cortex-a8.php - Cortex A7 : http://www.arm.com/products/processors/cortex-a/cortex-a7.php Change-Id: I91893789ed8edabf3767e1782e494b81158332bb Signed-off-by: Park Ju Hyung Signed-off-by: Maxime Poulain --- core/combo/arch/arm/armv7-a-neon.mk | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk index 2822624c7..9154f71a6 100644 --- a/core/combo/arch/arm/armv7-a-neon.mk +++ b/core/combo/arch/arm/armv7-a-neon.mk @@ -11,7 +11,7 @@ ifneq (,$(filter cortex-a15 krait denver,$(TARGET_$(combo_2nd_arch_prefix)CPU_VA # hardware divide operations are generated. This should be removed and a # krait CPU variant added to GCC. For clang we specify -mcpu for krait in # core/clang/arm.mk. - arch_variant_cflags := -mcpu=cortex-a15 + arch_variant_cflags := -mcpu=cortex-a15 -mfpu=neon-vfpv4 # Fake an ARM compiler flag as these processors support LPAE which GCC/clang # don't advertise. @@ -20,19 +20,19 @@ ifneq (,$(filter cortex-a15 krait denver,$(TARGET_$(combo_2nd_arch_prefix)CPU_VA -Wl,--no-fix-cortex-a8 else ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a9) - arch_variant_cflags := -mcpu=cortex-a9 + arch_variant_cflags := -mcpu=cortex-a9 -mfpu=neon else ifneq (,$(filter cortex-a8 scorpion,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT))) - arch_variant_cflags := -mcpu=cortex-a8 + arch_variant_cflags := -mcpu=cortex-a8 -mfpu=neon arch_variant_ldflags := \ -Wl,--fix-cortex-a8 else ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a7) - arch_variant_cflags := -mcpu=cortex-a7 + arch_variant_cflags := -mcpu=cortex-a7 -mfpu=neon-vfpv4 arch_variant_ldflags := \ -Wl,--no-fix-cortex-a8 else - arch_variant_cflags := -march=armv7-a + arch_variant_cflags := -march=armv7-a -mfpu=neon # Generic ARM might be a Cortex A8 -- better safe than sorry arch_variant_ldflags := \ -Wl,--fix-cortex-a8 @@ -42,5 +42,4 @@ endif endif arch_variant_cflags += \ - -mfloat-abi=softfp \ - -mfpu=neon + -mfloat-abi=softfp From 426f07f69f2f7fd6eedaf75424b0bc92f82ce7d5 Mon Sep 17 00:00:00 2001 From: Matt Mower Date: Tue, 20 May 2014 02:52:23 -0500 Subject: [PATCH 257/309] envsetup: dopush: set file permissions on files pushed to /data too * If pushing to /data, transfer old file owner, group, permissions to pushed files, then restorecon Signed-off-by: Chirayu Desai Change-Id: Ibf2945adee457a680a288f2bdfba3d2e29ba25a1 --- envsetup.sh | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 859aa5fae..0d4133880 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1892,11 +1892,29 @@ function dopush() # Copy: LOC="$LOC $(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Copy: ' | cut -d ':' -f 2)" + # If any files are going to /data, push an octal file permissions reader to device + if [ -n "$(echo $LOC | egrep '(^|\s)/data')" ]; then + CHKPERM="/data/local/tmp/chkfileperm.sh" +( +cat <<'EOF' +#!/system/xbin/sh +FILE=$@ +if [ -e $FILE ]; then + ls -l $FILE | awk '{k=0;for(i=0;i<=8;i++)k+=((substr($1,i+2,1)~/[rwx]/)*2^(8-i));if(k)printf("%0o ",k);print}' | cut -d ' ' -f1 +fi +EOF +) > $OUT/.chkfileperm.sh + echo "Pushing file permissions checker to device" + adb push $OUT/.chkfileperm.sh $CHKPERM + adb shell chmod 755 $CHKPERM + rm -f $OUT/.chkfileperm.sh + fi + stop_n_start=false for FILE in $LOC; do - # Make sure file is in $OUT/system + # Make sure file is in $OUT/system or $OUT/data case $FILE in - $OUT/system/*) + $OUT/system/*|$OUT/data/*) # Get target file name (i.e. /system/bin/adb) TARGET=$(echo $FILE | sed "s#$OUT##") ;; @@ -1904,6 +1922,25 @@ function dopush() esac case $TARGET in + /data/*) + # fs_config only sets permissions and se labels for files pushed to /system + if [ -n "$CHKPERM" ]; then + OLDPERM=$(adb shell $CHKPERM $TARGET) + OLDPERM=$(echo $OLDPERM | tr -d '\r' | tr -d '\n') + OLDOWN=$(adb shell ls -al $TARGET | awk '{print $2}') + OLDGRP=$(adb shell ls -al $TARGET | awk '{print $3}') + fi + echo "Pushing: $TARGET" + adb push $FILE $TARGET + if [ -n "$OLDPERM" ]; then + echo "Setting file permissions: $OLDPERM, $OLDOWN":"$OLDGRP" + adb shell chown "$OLDOWN":"$OLDGRP" $TARGET + adb shell chmod "$OLDPERM" $TARGET + else + echo "$TARGET did not exist previously, you should set file permissions manually" + fi + adb shell restorecon "$TARGET" + ;; /system/priv-app/SystemUI/SystemUI.apk|/system/framework/*) # Only need to stop services once if ! $stop_n_start; then @@ -1919,6 +1956,9 @@ function dopush() ;; esac done + if [ -n "$CHKPERM" ]; then + adb shell rm $CHKPERM + fi if $stop_n_start; then adb shell start fi From b0a39641275486ea1a80799315d1d45376bb2290 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 21 Aug 2013 17:05:26 -0700 Subject: [PATCH 258/309] generate_extra_images: Look for 3.10 dtbs The dtbs have changed location between 3.4 and 3.10. Look for the new location first and fallback to the 3.4 location if they're missing. Change-Id: I7aada8dbcf01ea6f62b3235b452c9329cd69e5e8 --- core/generate_extra_images.mk | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk index c25366856..9c41028a2 100644 --- a/core/generate_extra_images.mk +++ b/core/generate_extra_images.mk @@ -99,9 +99,12 @@ DTBTOOL := $(HOST_OUT_EXECUTABLES)/$(DTBTOOL_NAME)$(HOST_EXECUTABLE_SUFFIX) INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img +possible_dtb_dirs = $(KERNEL_OUT)/arch/arm/boot/dts/ $(KERNEL_OUT)/arch/arm/boot/ +dtb_dir = $(firstword $(wildcard $(possible_dtb_dirs))) + define build-dtimage-target $(call pretty,"Target dt image: $(INSTALLED_DTIMAGE_TARGET)") - $(hide) $(DTBTOOL) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(KERNEL_OUT)/arch/arm/boot/ + $(hide) $(DTBTOOL) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(dtb_dir) $(hide) chmod a+r $@ endef From e90475276e93cbbc0b8b19b854605e17351d927d Mon Sep 17 00:00:00 2001 From: David Ng Date: Wed, 11 Dec 2013 12:29:39 -0800 Subject: [PATCH 259/309] build: Generalize kernel DTB path Use $TARGET_KERNEL_ARCH to specify the architecture-dependent path location of the DTB files. Change-Id: I302f407d987e1b33acb0e47b284a1cb793747691 --- core/generate_extra_images.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk index 9c41028a2..b796d3f43 100644 --- a/core/generate_extra_images.mk +++ b/core/generate_extra_images.mk @@ -99,7 +99,7 @@ DTBTOOL := $(HOST_OUT_EXECUTABLES)/$(DTBTOOL_NAME)$(HOST_EXECUTABLE_SUFFIX) INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img -possible_dtb_dirs = $(KERNEL_OUT)/arch/arm/boot/dts/ $(KERNEL_OUT)/arch/arm/boot/ +possible_dtb_dirs = $(KERNEL_OUT)/arch/$(TARGET_KERNEL_ARCH)/boot/dts/ $(KERNEL_OUT)/arch/arm/boot/ dtb_dir = $(firstword $(wildcard $(possible_dtb_dirs))) define build-dtimage-target From 42649d743ba2a1c5de85767b634a4c6be5be492b Mon Sep 17 00:00:00 2001 From: Khalid Zubair Date: Tue, 6 Oct 2015 11:00:55 -0700 Subject: [PATCH 260/309] mka: allow mka to be run from anywhere in the tree The Android build system expects to be launched from the top of the tree. Invoking mka at any other level often has unintended consequence (e.g. targets the host ARCH because ARCH was not set). The various build shortcuts like m and mm invoke Make from the top of the tree. mka now does the same. Change-Id: Id956cf04cbaf1d12d12549ca54e32b9c2ce1fc29 --- envsetup.sh | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/envsetup.sh b/envsetup.sh index 0d4133880..5bbe0b34c 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1689,14 +1689,20 @@ function makerecipe() { } function mka() { - case `uname -s` in - Darwin) - make -j `sysctl hw.ncpu|cut -d" " -f2` "$@" - ;; - *) - mk_timer schedtool -B -n 1 -e ionice -n 1 make -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@" - ;; - esac + local T=$(gettop) + if [ "$T" ]; then + case `uname -s` in + Darwin) + make -C $T -j `sysctl hw.ncpu|cut -d" " -f2` "$@" + ;; + *) + mk_timer schedtool -B -n 1 -e ionice -n 1 make -C $T -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@" + ;; + esac + + else + echo "Couldn't locate the top of the tree. Try setting TOP." + fi } function cmka() { From 4265714b993775fb2f96d9d95c164c2449118143 Mon Sep 17 00:00:00 2001 From: Matt Wagantall Date: Thu, 15 Oct 2015 13:54:19 -0700 Subject: [PATCH 261/309] kernel: use merge-config.sh for integrating KERNEL_ADDITIONAL_CONFIG Using 'cat' to combine defconfig fragments is not, in general, safe. Overriding options is not correctly handled, and it's possible to produce illegal configurations which violate Kconfig dependency constraints. Instead, use the merge-config.sh script from the kernel tree which more intelligently combines defconfig fragments. An 'alldefconfig' make target is also introduced, as merge-config.sh depends on it. Change-Id: I91a8c5e4bcf7072a3f95651dffe17380174134e4 --- core/tasks/kernel.mk | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 293b86703..ce05153a7 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -226,8 +226,8 @@ $(KERNEL_CONFIG): $(KERNEL_OUT) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi $(hide) if [ ! -z "$(KERNEL_ADDITIONAL_CONFIG)" ]; then \ echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \ - cat $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG) >> $(KERNEL_OUT)/.config; \ - $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi + $(KERNEL_SRC)/scripts/kconfig/merge_config.sh -m -O $(KERNEL_OUT) $(KERNEL_OUT)/.config $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG); \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi TARGET_KERNEL_BINARIES: $(KERNEL_OUT) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL) $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE) @@ -258,8 +258,8 @@ $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi $(hide) if [ ! -z "$(KERNEL_ADDITIONAL_CONFIG)" ]; then \ echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \ - cat $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG) >> $(KERNEL_OUT)/.config; \ - $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi + $(KERNEL_SRC)/scripts/kconfig/merge_config.sh -m -O $(KERNEL_OUT) $(KERNEL_OUT)/.config $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG); \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags @@ -271,6 +271,10 @@ kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) savedefconfig cp $(KERNEL_OUT)/defconfig $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) +alldefconfig: $(KERNEL_OUT) + env KCONFIG_NOTIMESTAMP=true \ + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) alldefconfig + endif # FULL_KERNEL_BUILD ## Install it From c3636d1a40930c540b11a0bde18f8fba4ce31427 Mon Sep 17 00:00:00 2001 From: Matt Wagantall Date: Fri, 16 Oct 2015 16:32:33 -0700 Subject: [PATCH 262/309] kernel: Don't update defconfig with KERNEL_ADDITIONAL_CONFIG options If 'make kernelconfig' is run in an environment where KERNEL_ADDITIONAL_CONFIG is set, the options in the additional defconfig will be saved back to the defconfig when savedefconfig is performed. While not really a bug, this increases the likelihood of someone accidentally committing these changes or getting confused by unexpected deltas added to their defconfigs. Fix this by skipping the merge-config.sh step for the 'kernelconfig' make target. As a side-effect, options in KERNEL_ADDITIONAL_CONFIG will also not be displayed in menuconfig. This is not easily avoidable, and arguably not wrong anyway. Change-Id: I920be0338bb8afc87f6061c55aafb27ba658974d --- core/tasks/kernel.mk | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index ce05153a7..d43e8e10f 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -264,7 +264,11 @@ $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags -kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG) +kernelconfig: $(KERNEL_OUT) + $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG) + $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ + echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ + echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; fi env KCONFIG_NOTIMESTAMP=true \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) menuconfig env KCONFIG_NOTIMESTAMP=true \ From b37d8f675c0e53a14ac28bdc81904aedaea27d1c Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Wed, 28 Oct 2015 11:04:01 -0700 Subject: [PATCH 263/309] build: Introduce ainfo, aerror functions envsetup.sh calls make directly to set some build vars. Any "bare" info or error messages interfere with the logic, so they must be guarded with checks for CALLED_FROM_SETUP. This is tedious and error prone, so provide an alternative that handles this logic automagically. Change-Id: I9f9a355a6ae58f2b173cffce796e37be1e68238c --- core/config.mk | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/core/config.mk b/core/config.mk index 766be1dcc..a44652468 100644 --- a/core/config.mk +++ b/core/config.mk @@ -3,6 +3,24 @@ # current configuration and platform, which # are not specific to what is being built. +# These may be used to trace makefile issues without interfering with +# envsetup.sh. Usage: +# $(call ainfo,some info message) +# $(call aerror,some error message) +ifdef CALLED_FROM_SETUP +define ainfo +endef +define aerror +endef +else +define ainfo +$(info $(1)) +endef +define aerror +$(error $(1)) +endef +endif + # Only use ANDROID_BUILD_SHELL to wrap around bash. # DO NOT use other shells such as zsh. ifdef ANDROID_BUILD_SHELL From 18db3e746766008ee20bbbefdd3e5e70ac26d0ff Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Thu, 29 Oct 2015 17:57:42 -0700 Subject: [PATCH 264/309] build: Adjust for device-specific HAL paths * QCOM variant HALs are pretty much deprecated, long live device-specific HALs! Change-Id: I636d1e851519aba31ea3744b369aea06d6db6f68 --- core/qcom_target.mk | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index b9314d3e3..1b69715bd 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -1,9 +1,6 @@ # Target-specific configuration # Populate the qcom hardware variants in the project pathmap. -define qcom-set-path-variant -$(call project-set-path-variant,qcom-$(2),TARGET_QCOM_$(1)_VARIANT,hardware/qcom/$(2)) -endef define ril-set-path-variant $(call project-set-path-variant,ril,TARGET_RIL_VARIANT,hardware/$(1)) endef @@ -13,11 +10,11 @@ endef define bt-vendor-set-path-variant $(call project-set-path-variant,bt-vendor,TARGET_BT_VENDOR_VARIANT,hardware/qcom/$(1)) endef -define gps-hal-set-path-variant -$(call project-set-path-variant,gps-hal,TARGET_GPS_HAL_PATH,$(1)) -endef -define loc-api-set-path-variant -$(call project-set-path-variant,loc-api,TARGET_LOC_API_PATH,$(1)) + +# Set device-specific HALs into project pathmap +define set-device-specific-path +$(call project-set-path,qcom-$(2),$(strip $(if $(USE_DEVICE_SPECIFIC_$(1)), \ + $(TARGET_DEVICE_DIR)/$(2), $(3)))) endef ifeq ($(BOARD_USES_QCOM_HARDWARE),true) @@ -65,30 +62,31 @@ ifeq ($(BOARD_USES_QCOM_HARDWARE),true) endif $(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(QCOM_HARDWARE_VARIANT)) -ifeq ($(USE_DEVICE_SPECIFIC_CAMERA),true) -$(call project-set-path,qcom-camera,$(TARGET_DEVICE_DIR)/camera) -else -$(call qcom-set-path-variant,CAMERA,camera) -endif $(call project-set-path,qcom-display,hardware/qcom/display-caf/$(QCOM_HARDWARE_VARIANT)) -$(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media-caf/$(QCOM_HARDWARE_VARIANT)) -$(call qcom-set-path-variant,SENSORS,sensors) + +$(call set-device-specific-path,CAMERA,camera,hardware/qcom/camera) +$(call set-device-specific-path,GPS,gps,hardware/qcom/gps) +$(call set-device-specific-path,SENSORS,sensors,hardware/qcom/sensors) +$(call set-device-specific-path,LOC_API,loc-api,vendor/qcom/opensource/location) + $(call ril-set-path-variant,ril) $(call wlan-set-path-variant,wlan-caf) $(call bt-vendor-set-path-variant,bt-caf) -$(call loc-api-set-path-variant,vendor/qcom/opensource/location) -$(call gps-hal-set-path-variant,hardware/qcom/gps) + else + $(call project-set-path,qcom-audio,hardware/qcom/audio/default) -$(call qcom-set-path-variant,CAMERA,camera) $(call project-set-path,qcom-display,hardware/qcom/display/$(TARGET_BOARD_PLATFORM)) -$(call qcom-set-path-variant,GPS,gps) $(call project-set-path,qcom-media,hardware/qcom/media/default) -$(call qcom-set-path-variant,SENSORS,sensors) + +$(call project-set-path,CAMERA,hardware/qcom/camera) +$(call project-set-path,GPS,hardware/qcom/gps) +$(call project-set-path,SENSORS,hardware/qcom/sensors) +$(call project-set-path,LOC_API,vendor/qcom/opensource/location) + $(call ril-set-path-variant,ril) $(call wlan-set-path-variant,wlan) $(call bt-vendor-set-path-variant,bt) -$(call loc-api-set-path-variant,vendor/qcom/opensource/location) -$(call gps-hal-set-path-variant,hardware/qcom/gps) + endif From b910150e5ae426cadd6452df6a6055853b78acea Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Thu, 29 Oct 2015 18:03:05 +0200 Subject: [PATCH 265/309] generate_extra_images: Allow supplying arguments to dtbtool * Also add a message to indicate when dt.img is generated Change-Id: I670cc8aa571269d1dc1085e51b063fb890dc05f2 --- core/generate_extra_images.mk | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk index b796d3f43..041253ce4 100644 --- a/core/generate_extra_images.mk +++ b/core/generate_extra_images.mk @@ -104,12 +104,13 @@ dtb_dir = $(firstword $(wildcard $(possible_dtb_dirs))) define build-dtimage-target $(call pretty,"Target dt image: $(INSTALLED_DTIMAGE_TARGET)") - $(hide) $(DTBTOOL) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(dtb_dir) + $(hide) $(DTBTOOL) $(BOARD_DTBTOOL_ARGS) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(dtb_dir) $(hide) chmod a+r $@ endef $(INSTALLED_DTIMAGE_TARGET): $(DTBTOOL) $(INSTALLED_KERNEL_TARGET) $(build-dtimage-target) + @echo -e ${CL_CYN}"Made DT image: $@"${CL_RST} ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET) ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET) From 53498211ffeb61e4d14fcb6949a2f7f8ef511c86 Mon Sep 17 00:00:00 2001 From: Scott Mertz Date: Fri, 30 Oct 2015 12:48:06 -0700 Subject: [PATCH 266/309] build: skip checking boot jars by default Currently, builds are failing this because of non-whitelisted classes in the boot jar. Lets disable this check until we can refactor the code. Change-Id: I1d4b231d43702441a2d5ee9e3c058b9b2a88a38c --- core/tasks/boot_jars_package_check.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/tasks/boot_jars_package_check.mk b/core/tasks/boot_jars_package_check.mk index 188c26740..28f2b826a 100644 --- a/core/tasks/boot_jars_package_check.mk +++ b/core/tasks/boot_jars_package_check.mk @@ -16,6 +16,7 @@ # Rules to check if classes in the boot jars are from the whitelisted packages. # +ifneq ($(SKIP_BOOT_JARS_CHECK),) ifneq ($(SKIP_BOOT_JARS_CHECK),true) ifneq ($(TARGET_BUILD_PDK),true) ifdef PRODUCT_BOOT_JARS @@ -44,3 +45,4 @@ droidcore : check-boot-jars endif # PRODUCT_BOOT_JARS endif # TARGET_BUILD_PDK not true endif # SKIP_BOOT_JARS_CHECK not true +endif # SKIP_BOOT_JARS_CHECK not defined From cc2bc8565bc9f716e127a15fb5943b82d72da806 Mon Sep 17 00:00:00 2001 From: Pirama Arumuga Nainar Date: Thu, 9 Jul 2015 12:37:19 -0700 Subject: [PATCH 267/309] Add -mfpu=neon-vfpv4 if cpu is krait Upstream clang r239152 (http://reviews.llvm.org/D10239) caused a pretty significant change in behavior. Passing an FPU feature via -mfpu disables any feature not supported by those flags (in addition to the old behavior of enabling features supported by the flags). For e.g., -mfpu=neon used to just pass +neon, +vfp3 as target features to the backend. Now, -mfpu=neon also passes -vfp4, -fp16, -fp-armv8. The backend has always disabled implied feature bits if a feature is disabled. Upon seeing the target feature -vfp4, it will disable any processor/feature that implies vfp4, including the bit that the processor is a Krait. Since Krait has both Neon and vfp4, it is safe to pass '-mfpu=neon-vfpv4'. Change-Id: Ibbb992e80b8cbc8dc36d5df556885d0912baea22 --- core/clang/arm.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/clang/arm.mk b/core/clang/arm.mk index bf31f51bf..6b3d7c17c 100644 --- a/core/clang/arm.mk +++ b/core/clang/arm.mk @@ -7,7 +7,7 @@ CLANG_CONFIG_arm_EXTRA_CFLAGS := ifneq (,$(filter krait,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT))) # Android's clang support's krait as a CPU whereas GCC doesn't. Specify # -mcpu here rather than the more normal core/combo/arch/arm/armv7-a-neon.mk. - CLANG_CONFIG_arm_EXTRA_CFLAGS += -mcpu=krait + CLANG_CONFIG_arm_EXTRA_CFLAGS += -mcpu=krait -mfpu=neon-vfpv4 endif ifeq ($(HOST_OS),darwin) From d749f6c17b11c4cec5ec39d61c65f8a15c3ca481 Mon Sep 17 00:00:00 2001 From: David Ng Date: Fri, 27 Jul 2012 18:39:48 -0700 Subject: [PATCH 268/309] build: Add support for device tree in boot.img Add support for optional device tree image (dt.img) to boot and recovery images. Some devices use kernel device tree and this adds the device tree image as a section within the boot/recovery images. Change-Id: I91431ef2f4b86485895678916e39a8572be878eb Build: add DT image variable to Makefile DT image variable is currently present in generate_extra_images.mk.This file is moved to build/tasks to support persist image generation during parallel make. As build/tasks is called at the end of Makefile, DT image variable is not available for other images generation like boot and recovery. Adding this variable in Makefile ensures the variable is defined before usage Change-Id: I21f675d8ce648dc1cf1f4f3aede33278300e08c9 CRs-fixed: 548299 Fix the extra dt.img compilation issue. Add support for optional device tree image (dt.img) for device that doesnt have TARGET_BOOTIMAGE_USE_EXT2 Change-Id: I6e07b3ca6d049a8ebdad7ea304b4f39e7c846151 --- core/Makefile | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/core/Makefile b/core/Makefile index d9fc5baf9..7756d0ac6 100644 --- a/core/Makefile +++ b/core/Makefile @@ -530,6 +530,13 @@ ifdef BOARD_KERNEL_PAGESIZE INTERNAL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE) endif +INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img + +ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) + INTERNAL_BOOTIMAGE_ARGS += --dt $(INSTALLED_DTIMAGE_TARGET) + BOOTIMAGE_EXTRA_DEPS := $(INSTALLED_DTIMAGE_TARGET) +endif + INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true) @@ -555,7 +562,7 @@ else ifndef BOARD_CUSTOM_BOOTIMG_MK ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true -$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) +$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(BOOTIMAGE_EXTRA_DEPS) $(call pretty,"Target boot image: $@") $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@.unsigned $(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $@.keyblock $@ @@ -570,7 +577,7 @@ bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER) else # PRODUCT_SUPPORTS_VBOOT != true -$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) +$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOTIMAGE_EXTRA_DEPS) $(call pretty,"Target boot image: $@") $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) @@ -912,6 +919,10 @@ BOARD_KERNEL_PAGESIZE := $(strip $(BOARD_KERNEL_PAGESIZE)) ifdef BOARD_KERNEL_PAGESIZE INTERNAL_RECOVERYIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE) endif +ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) + INTERNAL_RECOVERYIMAGE_ARGS += --dt $(INSTALLED_DTIMAGE_TARGET) + RECOVERYIMAGE_EXTRA_DEPS := $(INSTALLED_DTIMAGE_TARGET) +endif # Keys authorized to sign OTA packages this build will accept. The # build always uses dev-keys for this; release packaging tools will @@ -992,8 +1003,9 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) \ $(RECOVERY_INSTALL_OTA_KEYS) ifndef BOARD_CUSTOM_BOOTIMG_MK -$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel) - $(call build-recoveryimage-target, $@) +$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel) \ + $(RECOVERYIMAGE_EXTRA_DEPS) + $(call build-recoveryimage-target, $@) endif # BOARD_CUSTOM_BOOTIMG_MK recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch) From 8945bc9a56fad9a7b1910d4d628680d67cce2fea Mon Sep 17 00:00:00 2001 From: Luca Stefani Date: Mon, 2 Nov 2015 05:33:10 -0800 Subject: [PATCH 269/309] build: fix dopush with zsh Change-Id: I4a9e71cfa820583c4d48363815f1ced05d972b54 --- envsetup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envsetup.sh b/envsetup.sh index 5bbe0b34c..cceb7045d 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -1917,7 +1917,7 @@ EOF fi stop_n_start=false - for FILE in $LOC; do + for FILE in $(echo $LOC | tr " " "\n"); do # Make sure file is in $OUT/system or $OUT/data case $FILE in $OUT/system/*|$OUT/data/*) From 784c84e02404add05d716491d2c3cdb06e8ccd5b Mon Sep 17 00:00:00 2001 From: Luca Stefani Date: Sat, 31 Oct 2015 04:50:20 -0700 Subject: [PATCH 270/309] build: create dt.img task * Remove dt.img creation from generate_extra_images Change-Id: I676b588cd9acb671771acda638abefee036f0519 --- core/generate_extra_images.mk | 37 ----------------------------------- core/tasks/dt_image.mk | 34 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 37 deletions(-) create mode 100644 core/tasks/dt_image.mk diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk index 041253ce4..319fb1fc4 100644 --- a/core/generate_extra_images.mk +++ b/core/generate_extra_images.mk @@ -80,43 +80,6 @@ $(INSTALLED_PERSISTIMAGE_TARGET): $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(INTERNAL_PERS ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_PERSISTIMAGE_TARGET) ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_PERSISTIMAGE_TARGET) - -#---------------------------------------------------------------------- -# Generate device tree image (dt.img) -#---------------------------------------------------------------------- -ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) -ifeq ($(strip $(BUILD_TINY_ANDROID)),true) -include device/qcom/common/dtbtool/Android.mk -endif - -ifeq ($(strip $(TARGET_CUSTOM_DTBTOOL)),) -DTBTOOL_NAME := dtbToolCM -else -DTBTOOL_NAME := $(TARGET_CUSTOM_DTBTOOL) -endif - -DTBTOOL := $(HOST_OUT_EXECUTABLES)/$(DTBTOOL_NAME)$(HOST_EXECUTABLE_SUFFIX) - -INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img - -possible_dtb_dirs = $(KERNEL_OUT)/arch/$(TARGET_KERNEL_ARCH)/boot/dts/ $(KERNEL_OUT)/arch/arm/boot/ -dtb_dir = $(firstword $(wildcard $(possible_dtb_dirs))) - -define build-dtimage-target - $(call pretty,"Target dt image: $(INSTALLED_DTIMAGE_TARGET)") - $(hide) $(DTBTOOL) $(BOARD_DTBTOOL_ARGS) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(dtb_dir) - $(hide) chmod a+r $@ -endef - -$(INSTALLED_DTIMAGE_TARGET): $(DTBTOOL) $(INSTALLED_KERNEL_TARGET) - $(build-dtimage-target) - @echo -e ${CL_CYN}"Made DT image: $@"${CL_RST} - -ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET) -ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET) -endif - - #---------------------------------------------------------------------- # Generate 1GB userdata image for 8930 #---------------------------------------------------------------------- diff --git a/core/tasks/dt_image.mk b/core/tasks/dt_image.mk new file mode 100644 index 000000000..b1030467c --- /dev/null +++ b/core/tasks/dt_image.mk @@ -0,0 +1,34 @@ +#---------------------------------------------------------------------- +# Generate device tree image (dt.img) +#---------------------------------------------------------------------- +ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) +ifeq ($(strip $(BUILD_TINY_ANDROID)),true) +include device/qcom/common/dtbtool/Android.mk +endif + +ifeq ($(strip $(TARGET_CUSTOM_DTBTOOL)),) +DTBTOOL_NAME := dtbToolCM +else +DTBTOOL_NAME := $(TARGET_CUSTOM_DTBTOOL) +endif + +DTBTOOL := $(HOST_OUT_EXECUTABLES)/$(DTBTOOL_NAME)$(HOST_EXECUTABLE_SUFFIX) + +INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img + +possible_dtb_dirs = $(KERNEL_OUT)/arch/$(TARGET_KERNEL_ARCH)/boot/dts/ $(KERNEL_OUT)/arch/arm/boot/ +dtb_dir = $(firstword $(wildcard $(possible_dtb_dirs))) + +define build-dtimage-target + $(call pretty,"Target dt image: $@") + $(hide) $(DTBTOOL) $(BOARD_DTBTOOL_ARGS) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(dtb_dir) + $(hide) chmod a+r $@ +endef + +$(INSTALLED_DTIMAGE_TARGET): $(DTBTOOL) $(INSTALLED_KERNEL_TARGET) + $(build-dtimage-target) + @echo -e ${CL_CYN}"Made DT image: $@"${CL_RST} + +ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET) +ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET) +endif From 81c34c410f6a3945299412ea5fd5293a79e98e0f Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Mon, 2 Nov 2015 02:07:40 +0200 Subject: [PATCH 271/309] Revert "Add auditd" * Deprecated This reverts commit 40cb2f95b09f1c81bde69d861eed7ced7c63f83c. Change-Id: I5aff2ee3f8f518e307b35acbeb88dab727630318 --- target/product/embedded.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/target/product/embedded.mk b/target/product/embedded.mk index a92cd34e3..25a8975c3 100644 --- a/target/product/embedded.mk +++ b/target/product/embedded.mk @@ -74,7 +74,6 @@ PRODUCT_PACKAGES += \ # SELinux packages PRODUCT_PACKAGES += \ - auditd \ sepolicy \ file_contexts \ seapp_contexts \ From 57dbb2485d8cfa8f7640a9de053bd5aaf5a65fa8 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Mon, 2 Nov 2015 02:10:26 +0200 Subject: [PATCH 272/309] full_base: Fix mismerge * Remove property that was mismerged in b3b47f8ecdc8c2657b34d89c3a8f40796a16f1f2 Change-Id: I18be52cc4a167f52150e56859f5139bda09d34e4 --- target/product/full_base.mk | 4 ---- 1 file changed, 4 deletions(-) diff --git a/target/product/full_base.mk b/target/product/full_base.mk index cd6e00b19..bac3e030f 100644 --- a/target/product/full_base.mk +++ b/target/product/full_base.mk @@ -23,10 +23,6 @@ PRODUCT_PACKAGES := \ libfwdlockengine \ WAPPushManager -# Additional settings used in all AOSP builds -PRODUCT_PROPERTY_OVERRIDES := \ - ro.com.android.dateformat=MM-dd-yyyy - # Put en_US first in the list, so make it default. PRODUCT_LOCALES := en_US From a9a11da48366abd21d98fe6a211a48fb7287a285 Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Thu, 5 Nov 2015 00:55:01 -0500 Subject: [PATCH 273/309] Conditionally create symlink /system/vendor if needed We don't need to build a vendor image in CM, just create the symlink for Nexus devices that use a vendor partition To use add BOARD_NEEDS_VENDORIMAGE_SYMLINK := true to BoardConfig.mk Change-Id: Id39ee89007af39346f0887796ed3cbad81765c46 --- core/Makefile | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/core/Makefile b/core/Makefile index 7756d0ac6..2f71b5bcd 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1155,6 +1155,18 @@ define create-system-vendor-symlink endef endif +# Only Create symlink /system/vendor to /vendor if necessary. +ifdef BOARD_NEEDS_VENDORIMAGE_SYMLINK +define create-system-vendor-symlink +$(hide) if [ -d $(TARGET_OUT)/vendor ] && [ ! -h $(TARGET_OUT)/vendor ]; then \ + echo 'Non-symlink $(TARGET_OUT)/vendor detected!' 1>&2; \ + echo 'You cannot install files to $(TARGET_OUT)/vendor while building a separate vendor.img!' 1>&2; \ + exit 1; \ +fi +$(hide) ln -sf /vendor $(TARGET_OUT)/vendor +endef +endif + # $(1): output file define build-systemimage-target @echo "Target system fs image: $(1)" From 1d43182c8c50083d52cd37f65ff31cf1217fb0aa Mon Sep 17 00:00:00 2001 From: Tom Marshall Date: Fri, 30 Oct 2015 06:08:48 -0700 Subject: [PATCH 274/309] build: Disable relocation packing on recovery and utility executables Change-Id: I2a543537c114cfc1a6d9746fe7c0bc00338ad32d --- core/dynamic_binary.mk | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk index aabde47e9..a4224cd73 100644 --- a/core/dynamic_binary.mk +++ b/core/dynamic_binary.mk @@ -57,6 +57,14 @@ ifeq ($(LOCAL_MODULE_CLASS),EXECUTABLES) my_pack_module_relocations := false endif +# Likewise for recovery and utility executables +ifeq ($(LOCAL_MODULE_CLASS),RECOVERY_EXECUTABLES) + my_pack_module_relocations := false +endif +ifeq ($(LOCAL_MODULE_CLASS),UTILITY_EXECUTABLES) + my_pack_module_relocations := false +endif + # TODO (dimitry): Relocation packer is not yet available for darwin ifneq ($(HOST_OS),linux) my_pack_module_relocations := false From 3c5e3116b24e615568bb4ab4a5eca71303e9e303 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Wed, 4 Nov 2015 11:59:23 -0800 Subject: [PATCH 275/309] build: Don't load dt.img task if custom mkbootimg is used Change-Id: I7617554a8dc6f44ea0c6a713d834da4fe558caec --- core/tasks/dt_image.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/tasks/dt_image.mk b/core/tasks/dt_image.mk index b1030467c..48a2ce79d 100644 --- a/core/tasks/dt_image.mk +++ b/core/tasks/dt_image.mk @@ -1,6 +1,7 @@ #---------------------------------------------------------------------- # Generate device tree image (dt.img) #---------------------------------------------------------------------- +ifeq ($(strip $(BOARD_CUSTOM_BOOTIMG_MK)),) ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) ifeq ($(strip $(BUILD_TINY_ANDROID)),true) include device/qcom/common/dtbtool/Android.mk @@ -32,3 +33,4 @@ $(INSTALLED_DTIMAGE_TARGET): $(DTBTOOL) $(INSTALLED_KERNEL_TARGET) ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET) ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET) endif +endif From ecff9d1decf63b6cff210ae7d6c03ccfd3cc3176 Mon Sep 17 00:00:00 2001 From: "Christopher N. Hesse" Date: Thu, 5 Nov 2015 09:53:50 +0100 Subject: [PATCH 276/309] core: Account for prebuilt DT images Change-Id: I52b49fd3e9fc0cb196372e3249c7e3d8b888cdd1 --- core/tasks/dt_image.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/tasks/dt_image.mk b/core/tasks/dt_image.mk index 48a2ce79d..421877294 100644 --- a/core/tasks/dt_image.mk +++ b/core/tasks/dt_image.mk @@ -3,6 +3,7 @@ #---------------------------------------------------------------------- ifeq ($(strip $(BOARD_CUSTOM_BOOTIMG_MK)),) ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true) +ifneq ($(strip $(BOARD_KERNEL_PREBUILT_DT)),true) ifeq ($(strip $(BUILD_TINY_ANDROID)),true) include device/qcom/common/dtbtool/Android.mk endif From 4c0058af544f3363fcbc7a2e1e533610ff71225c Mon Sep 17 00:00:00 2001 From: dhacker29 Date: Fri, 6 Nov 2015 02:38:14 -0500 Subject: [PATCH 277/309] dt_image: Fix build error build/core/tasks/dt_image.mk:38: *** missing `endif'. Stop. Change-Id: If333d378e091ff9d333729dc8a75323966954194 --- core/tasks/dt_image.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/core/tasks/dt_image.mk b/core/tasks/dt_image.mk index 421877294..86c36fb83 100644 --- a/core/tasks/dt_image.mk +++ b/core/tasks/dt_image.mk @@ -35,3 +35,4 @@ ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET) ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET) endif endif +endif From 702f5e333c9a964c3fecefc368b5512e49251842 Mon Sep 17 00:00:00 2001 From: Furquan Shaikh Date: Fri, 7 Aug 2015 11:58:05 -0700 Subject: [PATCH 278/309] vboot_signer: Add kernel subkey required for signing kernel keyblock Currently, the keyblock was being generated using data_key.vbprivk. However, we need to use kernel_subkey.vbprivk for kernel keyblock generation. This did not create any issues until now because dev-mode just throws a message saying that keyblock is invalid. But, normal-mode does not boot if keyblock is invalid. Add extra parameter for passing in kernel subkey to vboot_signer script. TEST="make bootimage-nodeps" generates correctly signed boot.img. Verified that the image boots fine in normal mode. Change-Id: I0fc2183b466e34ddf1d98c9532072548504fcec4 Signed-off-by: Furquan Shaikh --- core/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index 2f71b5bcd..cf64b0dfd 100644 --- a/core/Makefile +++ b/core/Makefile @@ -975,7 +975,6 @@ define build-recoveryimage-target $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) - $(if $(BOARD_CUSTOM_BOOTIMG_MK),, $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ @@ -985,7 +984,7 @@ define build-recoveryimage-target $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))) - @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} + @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} endef $(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) From 36ec474132b78d7dbbfccab96ef344ae27518c22 Mon Sep 17 00:00:00 2001 From: Furquan Shaikh Date: Mon, 10 Aug 2015 11:43:45 -0700 Subject: [PATCH 279/309] releasetools: Fix parameters for vboot_signer With the change in vboot_signer (CL:744257), an additional parameter is required to vboot_signer script. This change adds the required parameter to releasetools as well. BUG=23076037 Change-Id: Ice5329578b3a9bc459e278a9d404a1981b35ca88 Signed-off-by: Furquan Shaikh --- core/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/Makefile b/core/Makefile index cf64b0dfd..a80b99ab8 100644 --- a/core/Makefile +++ b/core/Makefile @@ -981,10 +981,10 @@ define build-recoveryimage-target $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\ $(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)) - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) - $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))) - @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} + $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) + @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST}) endef $(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) From 0469d626350f2edac8c36165a150015cd91d463a Mon Sep 17 00:00:00 2001 From: "Christopher N. Hesse" Date: Mon, 2 Nov 2015 23:07:50 +0100 Subject: [PATCH 280/309] build: Fix broken Darwin builds OS X does not support the --remove-destination argument. Change-Id: I6856963048d8f05130e42d10d260cba49dd0f001 --- core/Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/Makefile b/core/Makefile index a80b99ab8..c3ae351f1 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1067,8 +1067,10 @@ endif $(hide) cp -r $(PRODUCT_OUT)/boot.img $(ota_temp_root)/BOOTABLE_IMAGES/ $(hide) cp -r $(PRODUCT_OUT)/recovery.img $(ota_temp_root)/BOOTABLE_IMAGES/ $(hide) ./build/tools/releasetools/make_recovery_patch $(ota_temp_root) $(ota_temp_root) - $(hide) cp --remove-destination $(ota_temp_root)/SYSTEM/bin/install-recovery.sh $(TARGET_OUT)/bin/install-recovery.sh - $(hide) cp --remove-destination $(ota_temp_root)/SYSTEM/recovery-from-boot.p $(TARGET_OUT)/recovery-from-boot.p + $(hide) rm -f $(TARGET_OUT)/bin/install-recovery.sh + $(hide) rm -f $(TARGET_OUT)/recovery-from-boot.p + $(hide) cp $(ota_temp_root)/SYSTEM/bin/install-recovery.sh $(TARGET_OUT)/bin/install-recovery.sh + $(hide) cp $(ota_temp_root)/SYSTEM/recovery-from-boot.p $(TARGET_OUT)/recovery-from-boot.p $(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET) $(hide) mkdir -p $(dir $@) From 45a80214358ea9cf3bee57d3020c2234b3da97a7 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Fri, 7 Nov 2014 20:45:27 +0000 Subject: [PATCH 281/309] add support for extended prop post processing Change-Id: I7cbbf161a0982009c58f58e18d0c6a8a105d6bec --- core/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/Makefile b/core/Makefile index c3ae351f1..a0234f774 100644 --- a/core/Makefile +++ b/core/Makefile @@ -267,6 +267,9 @@ endif echo "$(line)" >> $@;) $(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@ $(hide) build/tools/post_process_props.py $@ "$(PRODUCT_PROPERTY_UBER_OVERRIDES)" $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST) +ifdef EXTENDED_POST_PROCESS_PROPS + $(hide) $(EXTENDED_POST_PROCESS_PROPS) $@ +endif build_desc := From 183ff865d45d0ea9be94942fad096dc7d442657e Mon Sep 17 00:00:00 2001 From: RobbieL811 Date: Tue, 12 May 2015 11:51:19 -0400 Subject: [PATCH 282/309] Add back mka bootzip Change-Id: I89163aa85c5341d0260bcb393cfc16dc9ff8a0c7 Signed-off-by: Josue Rivera --- core/Makefile | 12 +- tools/releasetools/boot_flash_from_image | 145 +++++++++++++++++++++++ 2 files changed, 156 insertions(+), 1 deletion(-) create mode 100755 tools/releasetools/boot_flash_from_image diff --git a/core/Makefile b/core/Makefile index a0234f774..c53208406 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1845,6 +1845,10 @@ else endif endif +BOOT_ZIP_FROM_IMAGE_SCRIPT := ./build/tools/releasetools/boot_flash_from_image +KERNEL_PATH := $(TARGET_KERNEL_SOURCE)/arch/arm/configs/$(TARGET_KERNEL_CONFIG) +#BOOT_ZIP_OUT_FILE := Slim-Kernel-$(DEVICE).zip + ifeq ($(TARGET_OTA_ASSERT_DEVICE),) $(INTERNAL_OTA_PACKAGE_TARGET): override_device := auto else @@ -1874,13 +1878,19 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) SLIM_TARGET_PACKAGE := $(PRODUCT_OUT)/$(SLIM_MOD_VERSION).zip -.PHONY: otapackage bacon +.PHONY: otapackage bacon bootzip otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage $(hide) ln -f $(INTERNAL_OTA_PACKAGE_TARGET) $(SLIM_TARGET_PACKAGE) $(hide) $(MD5SUM) $(SLIM_TARGET_PACKAGE) > $(SLIM_TARGET_PACKAGE).md5sum @echo -e ${CL_CYN}"Package Complete: $(SLIM_TARGET_PACKAGE)"${CL_RST} +bootzip: bootimage + $(BOOT_ZIP_FROM_IMAGE_SCRIPT) \ + $(recovery_fstab) \ + $(OUT) \ + $(TARGET_DEVICE) + # ----------------------------------------------------------------- # The factory package diff --git a/tools/releasetools/boot_flash_from_image b/tools/releasetools/boot_flash_from_image new file mode 100755 index 000000000..90dc16e37 --- /dev/null +++ b/tools/releasetools/boot_flash_from_image @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 The Android Open Source Project +# Copyright (C) 2013 SlimRoms +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sys import argv +from os import getcwd, environ, remove, path, listdir, makedirs, walk +from re import search, sub +from shutil import rmtree, copy2, copytree +from subprocess import call +from zipfile import ZipFile, ZipInfo +from hashlib import md5 + +# declare all the variables needed +fstab = open(argv[1], 'r').read() +out = argv[2] +device = argv[3] +defconfig = "%s/obj/KERNEL_OBJ/.config" % out +source = getcwd() # PWD + +try: + bootline = search('(\S|\t| )+/boot\s.*\n*', fstab).group(0) + boot_partition = search('(/\S+){2,}', bootline).group(0) + bpt = search('\s((?!/)\S)+\s', bootline).group(0) + boot_partition_type = sub(r'(\s)+', "", bpt) + + sysline = search('(\S|\t| )+/system\s.*\n*', fstab).group(0) + system_partition = search('(/\S+){2,}', sysline).group(0) + spt = search('\s((?!/)\S)+\s', sysline).group(0) + system_partition_type = sub(r'(\s)+', "", spt) +except: + raise ValueError("malformed recovery.fstab") + +if path.exists(defconfig): + prekernel_version = search('CONFIG_LOCALVERSION=.*\n', open(defconfig, 'r').read()) + if prekernel_version: + kernel_version = sub(r'(CONFIG_LOCALVERSION=)|\"|\n|-', "", prekernel_version.group(0)) + kernel_version = "%s-%s-kernel" % (kernel_version or "Slim", device) + else: + kernel_version = "Slim-%s-kernel" % device +else: + kernel_version = "Slim-%s-kernel" % device + +updater = "%s/obj/EXECUTABLES/updater_intermediates/updater" % out +signer = "%s/framework/signapk.jar" % environ['ANDROID_HOST_OUT'] + +# rm -r $OUT/*kernel* +for f in listdir(out): + if "-kernel" in f: + file = "%s/%s" % (out, f) + if path.isfile(file): + remove(file) + else: + rmtree(file) + +if not path.exists(updater): + with open("dump", "w") as dump: + silencer = call('make updater'.split(), stdout = dump) + remove("dump") +if not path.exists(signer): + with open("dump", "w") as dump: + silencer = call('make signapk'.split(), stdout = dump) + remove("dump") + +zip_dir = "%s/%s" % (out, kernel_version) +if path.exists(zip_dir): + rmtree(zip_dir) +makedirs(zip_dir) + +# updater-script +updater_dir = "%s/META-INF/com/google/android" % zip_dir +if not path.exists(updater_dir): + makedirs(updater_dir) +copy2(updater, "%s/update-binary" % updater_dir) + +updater_script = "%s/updater-script" % updater_dir + +# create the contents +contents = "ui_print(\"installing Slim Kernel...\");\n" +if boot_partition_type == "mtd": + contents += "package_extract_file(\"boot.img\", \"/tmp/boot.img\");\n" + contents += "write_raw_image(\"/tmp/boot.img\", \"%s\");\n" % boot_partition +elif boot_partition_type == "emmc": + contents += "package_extract_file(\"boot.img\", \"%s\");\n" % boot_partition +elif boot_partition_type == "bml": + contents += "assert(package_extract_file(\"boot.img\", \"/tmp/boot.img\")\n" + contents += "\twrite_raw_image(\"/tmp/boot.img\", \"%s\")\n" % boot_partition + contents += "\tdelete(\"/tmp/boot.img\"));\n" +contents += "mount(\"%s\", \"EMMC\", \"%s\", \"system\");\n" % (system_partition_type, system_partition) +contents += "package_extract_dir(\"system\", \"/system\");\n" +contents += "unmount(\"/system\");\n" +contents += "ui_print(\" \");\n" +contents += "ui_print(\"Done!\");" +with open(updater_script, "w") as f: + f.write(contents) + +# copy the kernel and libs +copy2("%s/boot.img" % out, "%s/boot.img" % zip_dir) +if path.exists("%s/system/lib/modules" % out): + if not path.exists("%s/system/lib" % zip_dir): + makedirs("%s/system/lib" % zip_dir) + copytree("%s/system/lib/modules" % out, "%s/system/lib/modules" % zip_dir, symlinks=True) + +# strip kernel modules +kernel_modules = "%s/system/lib/modules" % zip_dir +for root, dirs, files in walk(kernel_modules): + for file in files: + fn = path.join(root, file) + if not path.islink(fn): + call(['arm-eabi-strip', '--strip-unneeded', fn]) + +# zip package +with ZipFile("%s.zip" % zip_dir, "w") as zipper: + rootlen = len(zip_dir) + 1 + for root, dirs, files in walk(zip_dir): + for file in files: + fn = path.join(root, file) + if path.islink(fn): + sym = ZipInfo(fn[rootlen:]) + sym.create_system = 3 + sym.external_attr = 2716663808L + zipper.writestr(sym, fn) + else: + zipper.write(fn, fn[rootlen:]) + +# sign it +testkey_x = "%s/build/target/product/security/testkey.x509.pem" % source +testkey_p = "%s/build/target/product/security/testkey.pk8" % source +call(['java', '-jar', signer, testkey_x, testkey_p, "%s.zip" % zip_dir, "%s-signed.zip" % zip_dir]) +remove("%s.zip" % zip_dir) +#rmtree(zip_dir) +print md5(open("%s-signed.zip" % zip_dir, "rb").read()).hexdigest() + " %s-signed.zip" % (kernel_version) +print "kernel saved to %s-signed.zip" % zip_dir From f2d8f60c8b40dd652d4318454208cfffe6b99672 Mon Sep 17 00:00:00 2001 From: Andreas Blaesius Date: Tue, 11 Aug 2015 16:50:52 +0200 Subject: [PATCH 283/309] Set metadata on bootzip Change-Id: I54cf171f0b2e46ece98fa89ce2a3972bf7a5127d --- tools/releasetools/boot_flash_from_image | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/releasetools/boot_flash_from_image b/tools/releasetools/boot_flash_from_image index 90dc16e37..fb059427c 100755 --- a/tools/releasetools/boot_flash_from_image +++ b/tools/releasetools/boot_flash_from_image @@ -100,6 +100,8 @@ elif boot_partition_type == "bml": contents += "\tdelete(\"/tmp/boot.img\"));\n" contents += "mount(\"%s\", \"EMMC\", \"%s\", \"system\");\n" % (system_partition_type, system_partition) contents += "package_extract_dir(\"system\", \"/system\");\n" +contents += "ui_print(\"Setting permissions...\");\n" +contents += "set_metadata_recursive(\"/system/lib/modules\", \"uid\", 0, \"gid\", 0, \"dmode\", 0755, \"fmode\",0644, \"capabilities\", 0x0, \"selabel\", \"u:object_r:system_file:s0\");\n" contents += "unmount(\"/system\");\n" contents += "ui_print(\" \");\n" contents += "ui_print(\"Done!\");" From 1d70a2cf9dd4d114e3a1a6a2497258628d8fd409 Mon Sep 17 00:00:00 2001 From: Anthony King Date: Sun, 22 Nov 2015 11:04:23 +0100 Subject: [PATCH 284/309] repopick: fix topic Change-Id: I7840130166a2336fe1988257a7ffca7b273a5947 --- tools/repopick.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/repopick.py b/tools/repopick.py index 15566bb91..187d79707 100755 --- a/tools/repopick.py +++ b/tools/repopick.py @@ -194,7 +194,7 @@ def is_pathA_subdir_of_pathB(pathA, pathB): # Get all commits for a specified query def fetch_query(query): - url = 'http://review.slimroms.eu/changes/?q=%s' % query + url = 'https://review.slimroms.org/changes/?q=%s' % query if args.verbose: print('Fetching all commits using query: %s\n' % query) f = urllib.request.urlopen(url) @@ -208,7 +208,6 @@ def fetch_query(query): if matchObj: sys.stderr.write('ERROR: Query %s was not found on the server\n' % query) sys.exit(1) - d = re.sub(r'\[(.*)\]', r'\1', d) if args.verbose: print('Result from request:\n' + d) From 5fd75f4f9fb53ebc06529d043d3a8824a52ab1ab Mon Sep 17 00:00:00 2001 From: Griffin Millender Date: Tue, 29 Dec 2015 00:25:59 -0600 Subject: [PATCH 285/309] AOSPB initial --- core/Makefile | 14 +++--- core/config.mk | 4 +- core/dumpvar.mk | 2 +- core/product_config.mk | 10 ++-- envsetup.sh | 46 +++++++++---------- tools/buildinfo.sh | 2 +- .../{slim.mk.template => aospb.mk.template} | 9 ++-- tools/device/mkvendor.sh | 4 +- tools/releasetools/edify_generator.py | 2 +- tools/releasetools/ota_from_target_files.py | 16 +------ tools/roomservice.py | 8 ++-- 11 files changed, 50 insertions(+), 67 deletions(-) rename tools/device/{slim.mk.template => aospb.mk.template} (60%) diff --git a/core/Makefile b/core/Makefile index c53208406..215a3edf9 100644 --- a/core/Makefile +++ b/core/Makefile @@ -218,7 +218,7 @@ endif $(hide) TARGET_BUILD_TYPE="$(TARGET_BUILD_VARIANT)" \ TARGET_BUILD_FLAVOR="$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)" \ TARGET_DEVICE="$(TARGET_VENDOR_DEVICE_NAME)" \ - SLIM_DEVICE="$(TARGET_DEVICE)" \ + AOSPB_DEVICE="$(TARGET_DEVICE)" \ PRODUCT_NAME="$(TARGET_VENDOR_PRODUCT_NAME)" \ PRODUCT_BRAND="$(PRODUCT_BRAND)" \ PRODUCT_DEFAULT_LOCALE="$(call get-default-product-locale,$(PRODUCT_LOCALES))" \ @@ -1838,7 +1838,7 @@ endif ifeq ($(WITH_GMS),true) $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false else -ifneq ($(SLIM_BUILD),) +ifneq ($(AOSPB_BUILD),) $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := true else $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false @@ -1847,7 +1847,7 @@ endif BOOT_ZIP_FROM_IMAGE_SCRIPT := ./build/tools/releasetools/boot_flash_from_image KERNEL_PATH := $(TARGET_KERNEL_SOURCE)/arch/arm/configs/$(TARGET_KERNEL_CONFIG) -#BOOT_ZIP_OUT_FILE := Slim-Kernel-$(DEVICE).zip +#BOOT_ZIP_OUT_FILE := AOSPB-Kernel-$(DEVICE).zip ifeq ($(TARGET_OTA_ASSERT_DEVICE),) $(INTERNAL_OTA_PACKAGE_TARGET): override_device := auto @@ -1876,14 +1876,14 @@ $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS) $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \ $(BUILT_TARGET_FILES_PACKAGE) $@ -SLIM_TARGET_PACKAGE := $(PRODUCT_OUT)/$(SLIM_MOD_VERSION).zip +AOSPB_TARGET_PACKAGE := $(PRODUCT_OUT)/$(AOSPB_MOD_VERSION).zip .PHONY: otapackage bacon bootzip otapackage: $(INTERNAL_OTA_PACKAGE_TARGET) bacon: otapackage - $(hide) ln -f $(INTERNAL_OTA_PACKAGE_TARGET) $(SLIM_TARGET_PACKAGE) - $(hide) $(MD5SUM) $(SLIM_TARGET_PACKAGE) > $(SLIM_TARGET_PACKAGE).md5sum - @echo -e ${CL_CYN}"Package Complete: $(SLIM_TARGET_PACKAGE)"${CL_RST} + $(hide) ln -f $(INTERNAL_OTA_PACKAGE_TARGET) $(AOSPB_TARGET_PACKAGE) + $(hide) $(MD5SUM) $(AOSPB_TARGET_PACKAGE) > $(AOSPB_TARGET_PACKAGE).md5sum + @echo -e ${CL_CYN}"Package Complete: $(AOSPB_TARGET_PACKAGE)"${CL_RST} bootzip: bootimage $(BOOT_ZIP_FROM_IMAGE_SCRIPT) \ diff --git a/core/config.mk b/core/config.mk index a44652468..81ec3b1ed 100644 --- a/core/config.mk +++ b/core/config.mk @@ -723,10 +723,10 @@ ifneq ($(TARGET_COPY_FILES_OVERRIDES),) PRODUCT_COPY_FILES := $(filter-out $(TARGET_COPY_FILES_OVERRIDES), $(PRODUCT_COPY_FILES)) endif -ifneq ($(SLIM_BUILD),) +ifneq ($(AOSPB_BUILD),) ## We need to be sure the global selinux policies are included ## last, to avoid accidental resetting by device configs -$(eval include vendor/slim/sepolicy/sepolicy.mk) +$(eval include vendor/aospb/sepolicy/sepolicy.mk) # Include any vendor specific config.mk file -include $(TOPDIR)vendor/*/build/core/config.mk diff --git a/core/dumpvar.mk b/core/dumpvar.mk index 7aa4afe24..fb079450b 100644 --- a/core/dumpvar.mk +++ b/core/dumpvar.mk @@ -67,7 +67,7 @@ HOST_OS_EXTRA:=$(shell python -c "import platform; print(platform.platform())") $(info ============================================) $(info PLATFORM_VERSION_CODENAME=$(PLATFORM_VERSION_CODENAME)) $(info PLATFORM_VERSION=$(PLATFORM_VERSION)) -$(info SLIM_VERSION=$(SLIM_VERSION)) +$(info AOSPB_VERSION=$(AOSPB_VERSION)) $(info TARGET_PRODUCT=$(TARGET_PRODUCT)) $(info TARGET_BUILD_VARIANT=$(TARGET_BUILD_VARIANT)) $(info TARGET_BUILD_TYPE=$(TARGET_BUILD_TYPE)) diff --git a/core/product_config.mk b/core/product_config.mk index 1967e5222..83d9fcc73 100644 --- a/core/product_config.mk +++ b/core/product_config.mk @@ -179,9 +179,9 @@ include $(BUILD_SYSTEM)/node_fns.mk include $(BUILD_SYSTEM)/product.mk include $(BUILD_SYSTEM)/device.mk -# A SLIM build needs only the SLIM product makefiles. -ifneq ($(SLIM_BUILD),) - all_product_configs := $(shell find device -path "*/$(SLIM_BUILD)/slim.mk") +# A AOSPB build needs only the AOSPB product makefiles. +ifneq ($(AOSPB_BUILD),) + all_product_configs := $(shell find device -path "*/$(AOSPB_BUILD)/aospb.mk") else ifneq ($(strip $(TARGET_BUILD_APPS)),) # An unbundled app build needs only the core product makefiles. @@ -192,9 +192,9 @@ else # files in the tree. all_product_configs := $(get-all-product-makefiles) endif # TARGET_BUILD_APPS -endif # SLIM_BUILD +endif # AOSPB_BUILD -ifeq ($(SLIM_BUILD),) +ifeq ($(AOSPB_BUILD),) # Find the product config makefile for the current product. # all_product_configs consists items like: # : diff --git a/envsetup.sh b/envsetup.sh index cceb7045d..5824d1921 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -78,13 +78,13 @@ function check_product() return fi - if (echo -n $1 | grep -q -e "^slim_") ; then - SLIM_BUILD=$(echo -n $1 | sed -e 's/^slim_//g') - export BUILD_NUMBER=$((date +%s%N ; echo $SLIM_BUILD; hostname) | openssl sha1 | sed -e 's/.*=//g; s/ //g' | cut -c1-10) + if (echo -n $1 | grep -q -e "^aospb_") ; then + AOSPB_BUILD=$(echo -n $1 | sed -e 's/^aospb_//g') + export BUILD_NUMBER=$((date +%s%N ; echo $AOSPB_BUILD; hostname) | openssl sha1 | sed -e 's/.*=//g; s/ //g' | cut -c1-10) else - SLIM_BUILD= + AOSPB_BUILD= fi - export SLIM_BUILD + export AOSPB_BUILD TARGET_PRODUCT=$1 \ TARGET_BUILD_VARIANT= \ @@ -506,7 +506,7 @@ function print_lunch_menu() echo " (ohai, koush!)" fi echo - if [ "z${SLIM_DEVICES_ONLY}" != "z" ]; then + if [ "z${AOSPB_DEVICES_ONLY}" != "z" ]; then echo "Breakfast menu... pick a combo:" else echo "Lunch menu... pick a combo:" @@ -520,7 +520,7 @@ function print_lunch_menu() i=$(($i+1)) done | column - if [ "z${SLIM_DEVICES_ONLY}" != "z" ]; then + if [ "z${AOSPB_DEVICES_ONLY}" != "z" ]; then echo "... and don't forget the bacon!" fi @@ -549,10 +549,10 @@ function breakfast() { target=$1 local variant=$2 - SLIM_DEVICES_ONLY="true" + AOSPB_DEVICES_ONLY="true" unset LUNCH_MENU_CHOICES add_lunch_combo full-eng - for f in `/bin/ls vendor/slim/vendorsetup.sh 2> /dev/null` + for f in `/bin/ls vendor/aospb/vendorsetup.sh 2> /dev/null` do echo "including $f" . $f @@ -568,11 +568,11 @@ function breakfast() # A buildtype was specified, assume a full device name lunch $target else - # This is probably just the SLIM model name + # This is probably just the AOSPB model name if [ -z "$variant" ]; then variant="userdebug" fi - lunch slim_$target-$variant + lunch aospb_$target-$variant fi fi return $? @@ -622,7 +622,7 @@ function lunch() check_product $product if [ $? -ne 0 ] then - # if we can't find a product, try to grab it off the SLIM github + # if we can't find a product, try to grab it off the AOSPB github T=$(gettop) pushd $T > /dev/null build/tools/roomservice.py $product @@ -734,7 +734,7 @@ function tapas() function eat() { if [ "$OUT" ] ; then - MODVERSION=$(get_build_var SLIM_VERSION) + MODVERSION=$(get_build_var AOSPB_VERSION) ZIPFILE=$MODVERSION.zip ZIPPATH=$OUT/$ZIPFILE if [ ! -f $ZIPPATH ] ; then @@ -750,7 +750,7 @@ function eat() done echo "Device Found.." fi - if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD"); + if (adb shell getprop ro.aospb.device | grep -q "$AOSPB_BUILD"); then # if adbd isn't root we can't write to /cache/recovery/ adb root @@ -772,7 +772,7 @@ EOF fi return $? else - echo "The connected device does not appear to be $SLIM_BUILD, run away!" + echo "The connected device does not appear to be $AOSPB_BUILD, run away!" fi } @@ -1751,7 +1751,7 @@ function repopick() { function fixup_common_out_dir() { common_out_dir=$(get_build_var OUT_DIR)/target/common target_device=$(get_build_var TARGET_DEVICE) - if [ ! -z $SLIM_FIXUP_COMMON_OUT ]; then + if [ ! -z $AOSPB_FIXUP_COMMON_OUT ]; then if [ -d ${common_out_dir} ] && [ ! -L ${common_out_dir} ]; then mv ${common_out_dir} ${common_out_dir}-${target_device} ln -s ${common_out_dir}-${target_device} ${common_out_dir} @@ -1796,7 +1796,7 @@ function installboot() sleep 1 adb wait-for-online shell mount /system 2>&1 > /dev/null adb wait-for-online remount - if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD"); + if (adb shell getprop ro.aospb.device | grep -q "$AOSPB_BUILD"); then adb push $OUT/boot.img /cache/ for i in $OUT/system/lib/modules/*; @@ -1807,7 +1807,7 @@ function installboot() adb shell chmod 644 /system/lib/modules/* echo "Installation complete." else - echo "The connected device does not appear to be $SLIM_BUILD, run away!" + echo "The connected device does not appear to be $AOSPB_BUILD, run away!" fi } @@ -1841,13 +1841,13 @@ function installrecovery() sleep 1 adb wait-for-online shell mount /system 2>&1 > /dev/null adb wait-for-online remount - if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD"); + if (adb shell getprop ro.aospb.device | grep -q "$AOSPB_BUILD"); then adb push $OUT/recovery.img /cache/ adb shell dd if=/cache/recovery.img of=$PARTITION echo "Installation complete." else - echo "The connected device does not appear to be $SLIM_BUILD, run away!" + echo "The connected device does not appear to be $AOSPB_BUILD, run away!" fi } @@ -1867,7 +1867,7 @@ function dopush() echo "Device Found." fi - if (adb shell getprop ro.slim.device | grep -q "$SLIM_BUILD") || [ "$FORCE_PUSH" == "true" ]; + if (adb shell getprop ro.aospb.device | grep -q "$AOSPB_BUILD") || [ "$FORCE_PUSH" == "true" ]; then # retrieve IP and PORT info if we're using a TCP connection TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \ @@ -1971,7 +1971,7 @@ EOF rm -f $OUT/.log return 0 else - echo "The connected device does not appear to be $SLIM_BUILD, run away!" + echo "The connected device does not appear to be $AOSPB_BUILD, run away!" fi } @@ -2095,7 +2095,7 @@ unset f # Add completions check_bash_version && { - dirs="sdk/bash_completion vendor/slim/bash_completion" + dirs="sdk/bash_completion vendor/aospb/bash_completion" for dir in $dirs; do if [ -d ${dir} ]; then for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh index 185690d9d..238fdc034 100755 --- a/tools/buildinfo.sh +++ b/tools/buildinfo.sh @@ -58,6 +58,6 @@ if [ "$TARGET_UNIFIED_DEVICE" == "" ] ; then fi echo "ro.build.characteristics=$TARGET_AAPT_CHARACTERISTICS" -echo "ro.slim.device=$SLIM_DEVICE" +echo "ro.aospb.device=$AOSPB_DEVICE" echo "# end build properties" diff --git a/tools/device/slim.mk.template b/tools/device/aospb.mk.template similarity index 60% rename from tools/device/slim.mk.template rename to tools/device/aospb.mk.template index 6af93d7e9..7ddd27bba 100644 --- a/tools/device/slim.mk.template +++ b/tools/device/aospb.mk.template @@ -1,18 +1,15 @@ -## Specify phone tech before including full_phone -$(call inherit-product, vendor/slim/config/gsm.mk) - # Release name PRODUCT_RELEASE_NAME := __DEVICE__ -# Inherit some common SLIM stuff. -$(call inherit-product, vendor/slim/config/common_full_phone.mk) +# Inherit some common AOSPB stuff. +$(call inherit-product, vendor/aospb/config/common_full_phone.mk) # Inherit device configuration $(call inherit-product, device/__MANUFACTURER__/__DEVICE__/device___DEVICE__.mk) ## Device identifier. This must come after all inclusions PRODUCT_DEVICE := __DEVICE__ -PRODUCT_NAME := slim___DEVICE__ +PRODUCT_NAME := aospb___DEVICE__ PRODUCT_BRAND := __MANUFACTURER__ PRODUCT_MODEL := __DEVICE__ PRODUCT_MANUFACTURER := __MANUFACTURER__ diff --git a/tools/device/mkvendor.sh b/tools/device/mkvendor.sh index 2742a8b79..ca1eddb25 100755 --- a/tools/device/mkvendor.sh +++ b/tools/device/mkvendor.sh @@ -110,6 +110,6 @@ popd echo Done! echo Use the following command to set up your build environment: -echo ' 'lunch slim_$DEVICE-eng +echo ' 'lunch aospb_$DEVICE-eng echo And use the follwowing command to build a recovery: -echo ' '. build/tools/device/makerecoveries.sh slim_$DEVICE-eng +echo ' '. build/tools/device/makerecoveries.sh aospb_$DEVICE-eng diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py index 0c34fc0e9..a63ed9208 100644 --- a/tools/releasetools/edify_generator.py +++ b/tools/releasetools/edify_generator.py @@ -151,7 +151,7 @@ def RunBackup(self, command): self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command)) def ValidateSignatures(self, command): - self.script.append('package_extract_file("META-INF/org/slimroms/releasekey", "/tmp/releasekey");') + self.script.append('package_extract_file("META-INF/org/aospb/releasekey", "/tmp/releasekey");') # Exit code 124 == abort. run_program returns raw, so left-shift 8bit self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");') diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index 29ceaff95..2fc03df0f 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -626,20 +626,6 @@ def WriteFullOTAPackage(input_zip, output_zip): system_progress = 0.75 - script.Print(" _____________________ ") - script.Print(" / www.slimroms.org |") - script.Print(" / |") - script.Print(" / ___________________|") - script.Print(" / / ___ ") - script.Print(" / / ___/ \ ") - script.Print(" / / / \___/____ ____ ") - script.Print(" / / | |___/ \_/ |") - script.Print(" ___________/ / | | | |") - script.Print("| / | | | | | |") - script.Print("| / | | | | | |") - script.Print("|_____________/ \___^___^___^___^___/") - script.Print(" ") - if OPTIONS.wipe_user_data: system_progress -= 0.1 if HasVendorPartition(input_zip): @@ -760,7 +746,7 @@ def output_sink(fn, data): common.ZipWriteStr(output_zip, "system/build.prop", ""+input_zip.read("SYSTEM/build.prop")) - common.ZipWriteStr(output_zip, "META-INF/org/slimroms/releasekey", + common.ZipWriteStr(output_zip, "META-INF/org/aospb/releasekey", ""+input_zip.read("META/releasekey.txt")) def WritePolicyConfig(file_name, output_zip): diff --git a/tools/roomservice.py b/tools/roomservice.py index 5f5c237cb..b23da0eab 100755 --- a/tools/roomservice.py +++ b/tools/roomservice.py @@ -42,11 +42,11 @@ DEBUG = False default_manifest = ".repo/manifest.xml" -custom_local_manifest = ".repo/local_manifests/slim_manifest.xml" +custom_local_manifest = ".repo/local_manifests/aospb_manifest.xml" custom_default_revision = "mm6.0" -custom_dependencies = "slim.dependencies" -org_manifest = "SlimRoms" # leave empty if org is provided in manifest -org_display = "SlimRoms" # needed for displaying +custom_dependencies = "aospb.dependencies" +org_manifest = "AOSPB" # leave empty if org is provided in manifest +org_display = "AOSPB" # needed for displaying github_auth = None From 4fd5ff19d20a5ebdd09cd2e9bb847fe52046deca Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Fri, 7 Aug 2015 19:49:45 -0700 Subject: [PATCH 286/309] Change the cache partition size check into warnings. For some old builds, we may not define cache partition size. Change the exception into a warning to make the script backward compatible. Change-Id: Ie94c7fbb1a9f3a7db3f16e8d845e493a534aac5b --- tools/releasetools/blockimgdiff.py | 16 +++++++++++----- tools/releasetools/common.py | 4 ++++ tools/releasetools/ota_from_target_files.py | 5 +++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py index cb6fc8582..a6c6bd874 100644 --- a/tools/releasetools/blockimgdiff.py +++ b/tools/releasetools/blockimgdiff.py @@ -483,7 +483,7 @@ def WriteTransfers(self, prefix): if free_string: out.append("".join(free_string)) - if self.version >= 2: + if self.version >= 2 and common.OPTIONS.cache_size is not None: # Sanity check: abort if we're going to need more stash space than # the allowed size (cache_size * threshold). There are two purposes # of having a threshold here. a) Part of the cache may have been @@ -524,10 +524,16 @@ def WriteTransfers(self, prefix): if self.version >= 2: max_stashed_size = max_stashed_blocks * self.tgt.blocksize - max_allowed = common.OPTIONS.cache_size * common.OPTIONS.stash_threshold - print("max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n" % ( - max_stashed_blocks, max_stashed_size, max_allowed, - max_stashed_size * 100.0 / max_allowed)) + OPTIONS = common.OPTIONS + if OPTIONS.cache_size is not None: + max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold + print("max stashed blocks: %d (%d bytes), " + "limit: %d bytes (%.2f%%)\n" % ( + max_stashed_blocks, max_stashed_size, max_allowed, + max_stashed_size * 100.0 / max_allowed)) + else: + print("max stashed blocks: %d (%d bytes), limit: \n" % ( + max_stashed_blocks, max_stashed_size)) def ReviseStashSize(self): print("Revising stash size...") diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 9f2835a89..47cf75941 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -70,6 +70,10 @@ def __init__(self): # Values for "certificate" in apkcerts that mean special things. SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") +# Stash size cannot exceed cache_size * threshold. +OPTIONS.cache_size = None +OPTIONS.stash_threshold = 0.8 + class ExternalError(RuntimeError): pass diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index 2fc03df0f..8b83c818d 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -1700,6 +1700,11 @@ def option_handler(o, a): output_zip = zipfile.ZipFile(temp_zip_file, "w", compression=zipfile.ZIP_DEFLATED) + cache_size = OPTIONS.info_dict.get("cache_size", None) + if cache_size is None: + print "--- can't determine the cache partition size ---" + OPTIONS.cache_size = cache_size + if OPTIONS.incremental_source is None: WriteFullOTAPackage(input_zip, output_zip) if OPTIONS.package_key is None: From 6e6ff2106acd86f4829c3ea87025fc0e026b6957 Mon Sep 17 00:00:00 2001 From: Tony Malagisi Date: Wed, 25 Feb 2015 00:00:01 -0500 Subject: [PATCH 287/309] Add magic Removes target/out Change-Id: Iced2f249c2e08c1b754bf9a707367bc62d2ea1c2 --- core/main.mk | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/main.mk b/core/main.mk index 8643ee3d4..fc8f500d7 100644 --- a/core/main.mk +++ b/core/main.mk @@ -85,6 +85,9 @@ dont_bother_goals := clean clobber dataclean installclean \ ifneq ($(filter $(dont_bother_goals), $(MAKECMDGOALS)),) dont_bother := true endif +ifeq ($(MAKECMDGOALS),magic) +dont_bother := true +endif # Targets that provide quick help on the build system. include $(BUILD_SYSTEM)/help.mk @@ -1045,6 +1048,11 @@ clobber: clean # The rules for dataclean and installclean are defined in cleanbuild.mk. +.PHONY: magic +magic: + @rm -rf $(OUT_DIR)/target/product/* + @echo -e ${CL_GRN}"Target/Product directory removed."${CL_RST} + #xxx scrape this from ALL_MODULE_NAME_TAGS .PHONY: modules modules: From de3534b5f0a21eefd870ff703c52148fc9e14e99 Mon Sep 17 00:00:00 2001 From: Tony Malagisi Date: Wed, 25 Feb 2015 00:08:29 -0500 Subject: [PATCH 288/309] make it dirty Change-Id: I6f9ce0ee7aa3501daa73c4c5888c20725148a0a5 --- core/main.mk | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/main.mk b/core/main.mk index fc8f500d7..e7429b80c 100644 --- a/core/main.mk +++ b/core/main.mk @@ -88,6 +88,9 @@ endif ifeq ($(MAKECMDGOALS),magic) dont_bother := true endif +ifeq ($(MAKECMDGOALS),dirty) +dont_bother := true +endif # Targets that provide quick help on the build system. include $(BUILD_SYSTEM)/help.mk @@ -1053,6 +1056,14 @@ magic: @rm -rf $(OUT_DIR)/target/product/* @echo -e ${CL_GRN}"Target/Product directory removed."${CL_RST} +# Clears out zip and build.prop +.PHONY: dirty +dirty: + @rm -rf $(OUT_DIR)/target/product/*/system/build.prop + @rm -rf $(OUT_DIR)/target/product/*/*.zip + @rm -rf $(OUT_DIR)/target/product/*/*.md5sum + @echo -e ${CL_GRN}"build.prop and zip files erased"${CL_RST} + #xxx scrape this from ALL_MODULE_NAME_TAGS .PHONY: modules modules: From 2d992bb79679d33e58137a886800b389952dbe0b Mon Sep 17 00:00:00 2001 From: Tony Malagisi Date: Wed, 25 Feb 2015 05:55:31 -0500 Subject: [PATCH 289/309] Moar clean options! appclean: clears out all apks imgclean: clears out all img files kernelclean: clears out all kernel stuff systemclean: clears out all system stuff recoveryclean: clears out all recovery stuff rootclean: clears out all root stuff Conflicts: core/main.mk Change-Id: Ice7c589378892e09c2820ec14c9f383529a8fbc3 --- core/main.mk | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/core/main.mk b/core/main.mk index e7429b80c..fe1a28b88 100644 --- a/core/main.mk +++ b/core/main.mk @@ -91,6 +91,24 @@ endif ifeq ($(MAKECMDGOALS),dirty) dont_bother := true endif +ifeq ($(MAKECMDGOALS),appclean) +dont_bother := true +endif +ifeq ($(MAKECMDGOALS),imgclean) +dont_bother := true +endif +ifeq ($(MAKECMDGOALS),kernelclean) +dont_bother := true +endif +ifeq ($(MAKECMDGOALS),systemclean) +dont_bother := true +endif +ifeq ($(MAKECMDGOALS),recoveryclean) +dont_bother := true +endif +ifeq ($(MAKECMDGOALS),rootclean) +dont_bother := true +endif # Targets that provide quick help on the build system. include $(BUILD_SYSTEM)/help.mk @@ -1049,6 +1067,46 @@ clean: .PHONY: clobber clobber: clean +# Clears out all apks +.PHONY: appclean +appclean: + @rm -rf $(OUT_DIR)/target/product/*/system/app + @rm -rf $(OUT_DIR)/target/product/*/system/priv-app + @echo -e ${CL_GRN}"All apks erased"${CL_RST} + +# Clears out all .img files +.PHONY: imgclean +imgclean: + @rm -rf $(OUT_DIR)/target/product/*/*.img + @echo -e ${CL_GRN}"All .img files erased"${CL_RST} + +# Clears out all kernel stuff +.PHONY: kernelclean +kernelclean: + @rm -rf $(OUT_DIR)/target/product/*/kernel + @rm -rf $(OUT_DIR)/target/product/*/boot.img + @echo -e ${CL_GRN}"All kernel compnents erased"${CL_RST} + +# Clears out all system stuff +.PHONY: systemclean +systemclean: + @rm -rf $(OUT_DIR)/target/product/*/system/ + @rm -rf $(OUT_DIR)/target/product/*/system.img + @echo -e ${CL_GRN}"System components erased"${CL_RST} + +# Clears out all recovery stuff +.PHONY: recoveryclean +recoveryclean: + @rm -rf $(OUT_DIR)/target/product/*/recovery/ + @rm -rf $(OUT_DIR)/target/product/*/recovery.img + @echo -e ${CL_GRN}"All recovery components erased"${CL_RST} + +# Clears out all root stuff +.PHONY: rootclean +rootclean: + @rm -rf $(OUT_DIR)/target/product/*/root/ + @echo -e ${CL_GRN}"All root components erased"${CL_RST} + # The rules for dataclean and installclean are defined in cleanbuild.mk. .PHONY: magic From 189f3e125dfaf804f78933bfa23627b326ba3429 Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Sat, 9 Jan 2016 19:43:36 -0800 Subject: [PATCH 290/309] qcom: Fix non-QC target pathmappings * The actual key for camera, gps, loc-api is qcom-$(name), not $(NAME). Change-Id: Id0f2ed760cae3b5627983ef5a2206775bc880637 --- core/qcom_target.mk | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 1b69715bd..2578ef100 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -80,10 +80,10 @@ $(call project-set-path,qcom-audio,hardware/qcom/audio/default) $(call project-set-path,qcom-display,hardware/qcom/display/$(TARGET_BOARD_PLATFORM)) $(call project-set-path,qcom-media,hardware/qcom/media/default) -$(call project-set-path,CAMERA,hardware/qcom/camera) -$(call project-set-path,GPS,hardware/qcom/gps) -$(call project-set-path,SENSORS,hardware/qcom/sensors) -$(call project-set-path,LOC_API,vendor/qcom/opensource/location) +$(call project-set-path,qcom-camera,hardware/qcom/camera) +$(call project-set-path,qcom-gps,hardware/qcom/gps) +$(call project-set-path,qcom-sensors,hardware/qcom/sensors) +$(call project-set-path,qcom-loc-api,vendor/qcom/opensource/location) $(call ril-set-path-variant,ril) $(call wlan-set-path-variant,wlan) From d0da961d29cbd7a0c20042375cb139f6db9418da Mon Sep 17 00:00:00 2001 From: Ethan Chen Date: Sat, 9 Jan 2016 19:26:00 -0800 Subject: [PATCH 291/309] qcom: Add dataservices to project pathmap * Set vendor/qcom/opensource/dataservices as the default dataservices provider. * Devices can override this choice by setting USE_DEVICE_SPECIFIC_DATASERVICES and providing dataservices in the $(TARGET_DEVICE_DIR)/dataservices directory. * Set $(TARGET_DEVICE_DIR)/dataservices as the default dataservices provider for non-QC hardware (Nexus devices). Change-Id: Ie003110d7b745367947b4b728bc83800c0351068 --- core/qcom_target.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/qcom_target.mk b/core/qcom_target.mk index 2578ef100..aa366151e 100644 --- a/core/qcom_target.mk +++ b/core/qcom_target.mk @@ -69,6 +69,7 @@ $(call set-device-specific-path,CAMERA,camera,hardware/qcom/camera) $(call set-device-specific-path,GPS,gps,hardware/qcom/gps) $(call set-device-specific-path,SENSORS,sensors,hardware/qcom/sensors) $(call set-device-specific-path,LOC_API,loc-api,vendor/qcom/opensource/location) +$(call set-device-specific-path,DATASERVICES,dataservices,vendor/qcom/opensource/dataservices) $(call ril-set-path-variant,ril) $(call wlan-set-path-variant,wlan-caf) @@ -84,6 +85,7 @@ $(call project-set-path,qcom-camera,hardware/qcom/camera) $(call project-set-path,qcom-gps,hardware/qcom/gps) $(call project-set-path,qcom-sensors,hardware/qcom/sensors) $(call project-set-path,qcom-loc-api,vendor/qcom/opensource/location) +$(call project-set-path,qcom-dataservices,$(TARGET_DEVICE_DIR)/dataservices) $(call ril-set-path-variant,ril) $(call wlan-set-path-variant,wlan) From 16bdb2ca057502621e35a3c97e20b0acdb0be427 Mon Sep 17 00:00:00 2001 From: Khalid Zubair Date: Tue, 17 Nov 2015 09:59:49 -0800 Subject: [PATCH 292/309] kernel: add `make kernelxconfig' target kernelxconfig is identical to kernelconfig except that it launches xconfig instead of menuconfig. xconfig easier to use if you need to search the Kconfig tree. Change-Id: I6d91c7e3efbc9f172b040425f2ec545e3f03b44d (cherry picked from commit 60ca2b7897b552ce5dee2633fc84906222579763) --- core/tasks/kernel.mk | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index d43e8e10f..d1d24d6f2 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -264,13 +264,15 @@ $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags -kernelconfig: $(KERNEL_OUT) +kernelconfig: KERNELCONFIG_MODE := menuconfig +kernelxconfig: KERNELCONFIG_MODE := xconfig +kernelxconfig kernelconfig: $(KERNEL_OUT) $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG) $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; fi env KCONFIG_NOTIMESTAMP=true \ - $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) menuconfig + $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNELCONFIG_MODE) env KCONFIG_NOTIMESTAMP=true \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) savedefconfig cp $(KERNEL_OUT)/defconfig $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) From 7dd98b7182eb3c6173bd23435fba98d02345d352 Mon Sep 17 00:00:00 2001 From: Khalid Zubair Date: Wed, 18 Nov 2015 15:54:40 -0800 Subject: [PATCH 293/309] kernel: don't build modules or dtbs unless enabled Build modules and dtbs only if enabled. The make commands were allowed to fail silently to support builds that did not have these options enabled. This had a side effect of allowing builds to complete even if modules failed to build. Don't suppress these error anymore. Change-Id: I842124b465d9e14edd6a09ffe54f09da32f10632 --- core/tasks/kernel.mk | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index d1d24d6f2..140964c88 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -230,12 +230,26 @@ $(KERNEL_CONFIG): $(KERNEL_OUT) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi TARGET_KERNEL_BINARIES: $(KERNEL_OUT) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL) + @echo -e ${CL_GRN}"Building Kernel"${CL_RST} $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE) - -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) dtbs - -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules - -$(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules_install - $(mv-modules) - $(clean-module-folder) + $(hide) if grep -q 'CONFIG_OF=y' $(KERNEL_CONFIG) ; \ + then \ + echo -e ${CL_GRN}"Building DTBs"${CL_RST} ; \ + $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) dtbs ; \ + else \ + echo "DTBs not enabled" ; \ + fi ; + $(hide) if grep -q 'CONFIG_MODULES=y' $(KERNEL_CONFIG) ; \ + then \ + echo -e ${CL_GRN}"Building Kernel Modules"${CL_RST} ; \ + $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules && \ + $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules_install && \ + $(mv-modules) && \ + $(clean-module-folder) ; \ + else \ + echo "Kernel Modules not enabled" ; \ + fi ; + $(TARGET_KERNEL_MODULES): TARGET_KERNEL_BINARIES From 1f1ace35d51655154a14ddeebe62ed734ad3b347 Mon Sep 17 00:00:00 2001 From: Khalid Zubair Date: Wed, 18 Nov 2015 17:13:09 -0800 Subject: [PATCH 294/309] kernel: prevent targets from being rebuilt needlessly Fix some rules that listed directories as their dependencies. Directories are always out-of-date because the rules that depend on them touch file under the directories they depend on them. Replace the directory dependencies with stamp file rules that handle directory creation. Change-Id: I2b36c846b6565b7f9aba5bb7583576ad300983b8 --- core/tasks/kernel.mk | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index 140964c88..cc1b5ce1e 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -28,6 +28,7 @@ SELINUX_DEFCONFIG := $(TARGET_KERNEL_SELINUX_CONFIG) ## Internal variables KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ KERNEL_CONFIG := $(KERNEL_OUT)/.config +KERNEL_OUT_STAMP := $(KERNEL_OUT)/.mkdir_stamp TARGET_KERNEL_ARCH := $(strip $(TARGET_KERNEL_ARCH)) ifeq ($(TARGET_KERNEL_ARCH),) @@ -35,6 +36,7 @@ KERNEL_ARCH := $(TARGET_ARCH) else KERNEL_ARCH := $(TARGET_KERNEL_ARCH) endif +KERNEL_DEFCONFIG_SRC := $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) TARGET_KERNEL_HEADER_ARCH := $(strip $(TARGET_KERNEL_HEADER_ARCH)) ifeq ($(TARGET_KERNEL_HEADER_ARCH),) @@ -160,6 +162,7 @@ endif ifeq ($(FULL_KERNEL_BUILD),true) KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr +KERNEL_HEADERS_INSTALL_STAMP := $(KERNEL_OUT)/.headers_install_stamp KERNEL_MODULES_INSTALL := system KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules @@ -214,11 +217,12 @@ ifeq ($(TARGET_KERNEL_MODULES),) TARGET_KERNEL_MODULES := no-external-modules endif -$(KERNEL_OUT): - mkdir -p $(KERNEL_OUT) - mkdir -p $(KERNEL_MODULES_OUT) +$(KERNEL_OUT_STAMP): + $(hide) mkdir -p $(KERNEL_OUT) + $(hide) mkdir -p $(KERNEL_MODULES_OUT) + $(hide) touch $@ -$(KERNEL_CONFIG): $(KERNEL_OUT) +$(KERNEL_CONFIG): $(KERNEL_OUT_STAMP) $(KERNEL_DEFCONFIG_SRC) $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG) $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ @@ -229,7 +233,7 @@ $(KERNEL_CONFIG): $(KERNEL_OUT) $(KERNEL_SRC)/scripts/kconfig/merge_config.sh -m -O $(KERNEL_OUT) $(KERNEL_OUT)/.config $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi -TARGET_KERNEL_BINARIES: $(KERNEL_OUT) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL) +TARGET_KERNEL_BINARIES: $(KERNEL_OUT_STAMP) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL_STAMP) @echo -e ${CL_GRN}"Building Kernel"${CL_RST} $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE) $(hide) if grep -q 'CONFIG_OF=y' $(KERNEL_CONFIG) ; \ @@ -257,7 +261,7 @@ $(TARGET_PREBUILT_INT_KERNEL): $(TARGET_KERNEL_MODULES) $(mv-modules) $(clean-module-folder) -$(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) +$(KERNEL_HEADERS_INSTALL_STAMP): $(KERNEL_OUT_STAMP) $(KERNEL_CONFIG) $(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \ rm -f ../$(KERNEL_CONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_HEADER_DEFCONFIG); \ @@ -274,13 +278,17 @@ $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT) $(KERNEL_CONFIG) echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \ $(KERNEL_SRC)/scripts/kconfig/merge_config.sh -m -O $(KERNEL_OUT) $(KERNEL_OUT)/.config $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG); \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi + $(hide) touch $@ -kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG) +# provide this rule because there are dependencies on this throughout the repo +$(KERNEL_HEADERS_INSTALL): $(KERNEL_HEADERS_INSTALL_STAMP) + +kerneltags: $(KERNEL_OUT_STAMP) $(KERNEL_CONFIG) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags kernelconfig: KERNELCONFIG_MODE := menuconfig kernelxconfig: KERNELCONFIG_MODE := xconfig -kernelxconfig kernelconfig: $(KERNEL_OUT) +kernelxconfig kernelconfig: $(KERNEL_OUT_STAMP) $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG) $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \ @@ -289,9 +297,9 @@ kernelxconfig kernelconfig: $(KERNEL_OUT) $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNELCONFIG_MODE) env KCONFIG_NOTIMESTAMP=true \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) savedefconfig - cp $(KERNEL_OUT)/defconfig $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) + cp $(KERNEL_OUT)/defconfig $(KERNEL_DEFCONFIG_SRC) -alldefconfig: $(KERNEL_OUT) +alldefconfig: $(KERNEL_OUT_STAMP) env KCONFIG_NOTIMESTAMP=true \ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) alldefconfig From e5cb9a9a2f37ec8f0a7f8819c63f1232148f5d04 Mon Sep 17 00:00:00 2001 From: Michael Bestas Date: Tue, 5 Jan 2016 16:03:45 +0200 Subject: [PATCH 295/309] kernel: Fix usage of KERNEL_ARCH * x86_64 defconfigs live in arch/x86/configs * Use KERNEL_ARCH instead of TARGET_KERNEL_ARCH Change-Id: Idc191ea658fc4100bc7ad40958023c5f936daf44 --- core/tasks/kernel.mk | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk index cc1b5ce1e..763df7376 100644 --- a/core/tasks/kernel.mk +++ b/core/tasks/kernel.mk @@ -36,7 +36,13 @@ KERNEL_ARCH := $(TARGET_ARCH) else KERNEL_ARCH := $(TARGET_KERNEL_ARCH) endif -KERNEL_DEFCONFIG_SRC := $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG) + +ifeq ($(KERNEL_ARCH),x86_64) +KERNEL_DEFCONFIG_ARCH := x86 +else +KERNEL_DEFCONFIG_ARCH := $(KERNEL_ARCH) +endif +KERNEL_DEFCONFIG_SRC := $(KERNEL_SRC)/arch/$(KERNEL_DEFCONFIG_ARCH)/configs/$(KERNEL_DEFCONFIG) TARGET_KERNEL_HEADER_ARCH := $(strip $(TARGET_KERNEL_HEADER_ARCH)) ifeq ($(TARGET_KERNEL_HEADER_ARCH),) @@ -57,7 +63,7 @@ else ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true) TARGET_PREBUILT_INT_KERNEL_TYPE := Image else - ifeq ($(TARGET_KERNEL_ARCH),arm64) + ifeq ($(KERNEL_ARCH),arm64) TARGET_PREBUILT_INT_KERNEL_TYPE := Image.gz else TARGET_PREBUILT_INT_KERNEL_TYPE := zImage From 17ee42fcb65271ae4633882ff440872285d83ef1 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Wed, 18 Nov 2015 16:31:30 -0800 Subject: [PATCH 296/309] MMB29N Change-Id: If01e474aad80918fe3c7f79627967ccbb499caee --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index af1f2f174..6e2bd1747 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29M +export BUILD_ID=MMB29N From 40e86a5683be67abf797a8eadcab927d104693bd Mon Sep 17 00:00:00 2001 From: Bart Sears Date: Sat, 14 Nov 2015 23:50:18 -0800 Subject: [PATCH 297/309] Change version back to 6.0 Temporarily set the version back to 6.0 and security patch to 2015-11-01. Change-Id: Idb09d6bab89362b18ae9657e3ed931d614c1894b --- core/version_defaults.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/version_defaults.mk b/core/version_defaults.mk index a67a82ea0..a130799fd 100644 --- a/core/version_defaults.mk +++ b/core/version_defaults.mk @@ -42,7 +42,7 @@ ifeq "" "$(PLATFORM_VERSION)" # which is the version that we reveal to the end user. # Update this value when the platform version changes (rather # than overriding it somewhere else). Can be an arbitrary string. - PLATFORM_VERSION := 6.0.1 + PLATFORM_VERSION := 6.0 endif ifeq "" "$(PLATFORM_SDK_VERSION)" @@ -103,7 +103,7 @@ ifeq "" "$(PLATFORM_SECURITY_PATCH)" # Can be an arbitrary string, but must be a single word. # # If there is no $PLATFORM_SECURITY_PATCH set, keep it empty. - PLATFORM_SECURITY_PATCH := 2015-12-01 + PLATFORM_SECURITY_PATCH := 2015-11-01 endif ifeq "" "$(PLATFORM_BASE_OS)" From cf12ff9e8a5141ee76b8362184fbc97138fa95c5 Mon Sep 17 00:00:00 2001 From: Bart Sears Date: Sun, 15 Nov 2015 17:57:13 +0000 Subject: [PATCH 298/309] Revert "Change version back to 6.0" This reverts commit 970f203b3d3f17b778749c3ce47eb6b922aa606c. Change-Id: Icd96990f128e303b0e2af5af0f0d4a0f7731fe60 --- core/version_defaults.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/version_defaults.mk b/core/version_defaults.mk index a130799fd..a67a82ea0 100644 --- a/core/version_defaults.mk +++ b/core/version_defaults.mk @@ -42,7 +42,7 @@ ifeq "" "$(PLATFORM_VERSION)" # which is the version that we reveal to the end user. # Update this value when the platform version changes (rather # than overriding it somewhere else). Can be an arbitrary string. - PLATFORM_VERSION := 6.0 + PLATFORM_VERSION := 6.0.1 endif ifeq "" "$(PLATFORM_SDK_VERSION)" @@ -103,7 +103,7 @@ ifeq "" "$(PLATFORM_SECURITY_PATCH)" # Can be an arbitrary string, but must be a single word. # # If there is no $PLATFORM_SECURITY_PATCH set, keep it empty. - PLATFORM_SECURITY_PATCH := 2015-11-01 + PLATFORM_SECURITY_PATCH := 2015-12-01 endif ifeq "" "$(PLATFORM_BASE_OS)" From 4318d6e77e2affd3f9bc698bf7a02923f8cf800c Mon Sep 17 00:00:00 2001 From: Zach Jang Date: Fri, 20 Nov 2015 14:37:14 -0800 Subject: [PATCH 299/309] Update Security String to 2016-01-01 to mnc-dev http://b/25819582 Change-Id: Ie55f9476110b08591da05774f582a6e48ce12de7 --- core/version_defaults.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/version_defaults.mk b/core/version_defaults.mk index a67a82ea0..29392242d 100644 --- a/core/version_defaults.mk +++ b/core/version_defaults.mk @@ -103,7 +103,7 @@ ifeq "" "$(PLATFORM_SECURITY_PATCH)" # Can be an arbitrary string, but must be a single word. # # If there is no $PLATFORM_SECURITY_PATCH set, keep it empty. - PLATFORM_SECURITY_PATCH := 2015-12-01 + PLATFORM_SECURITY_PATCH := 2016-01-01 endif ifeq "" "$(PLATFORM_BASE_OS)" From b4d7f21c4387cb057ef7c05aa17567297cdb576c Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Tue, 1 Dec 2015 17:42:01 -0800 Subject: [PATCH 300/309] "MMB29O" Change-Id: I6510f4960c480a60323c10cf567f10ae9803e6c5 --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index 6e2bd1747..1eb4d737b 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29N +export BUILD_ID=MMB29O From f983449dc4af3912931c11d1ca4e04e12759f7f4 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Tue, 8 Dec 2015 12:35:29 -0800 Subject: [PATCH 301/309] MMN29P Change-Id: Ica945b28e86f8160a1000f443b642f27fcc51f31 --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index 1eb4d737b..3d097a089 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29O +export BUILD_ID=MMB29P From 27641b559c72b35e6ec13755c294650c3dfebece Mon Sep 17 00:00:00 2001 From: Zach Jang Date: Wed, 9 Dec 2015 12:46:59 -0800 Subject: [PATCH 302/309] Update Security String to 2016-02-01 b/26110717 Change-Id: I1085f5d053b07c6c81d2ef22fbba5ab9157a67f2 --- core/version_defaults.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/version_defaults.mk b/core/version_defaults.mk index 29392242d..bf9b2e84b 100644 --- a/core/version_defaults.mk +++ b/core/version_defaults.mk @@ -103,7 +103,7 @@ ifeq "" "$(PLATFORM_SECURITY_PATCH)" # Can be an arbitrary string, but must be a single word. # # If there is no $PLATFORM_SECURITY_PATCH set, keep it empty. - PLATFORM_SECURITY_PATCH := 2016-01-01 + PLATFORM_SECURITY_PATCH := 2016-02-01 endif ifeq "" "$(PLATFORM_BASE_OS)" From 03366819ea37bdb7166b8ff7958c203f26c41ad3 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Thu, 10 Dec 2015 16:11:59 -0800 Subject: [PATCH 303/309] "MMB29Q" Change-Id: Ia6f822e71e63e2937bfafb0f6d1994e32241025c --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index 3d097a089..b1746509c 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29P +export BUILD_ID=MMB29Q From 78d9b006a5389cc5e3727bdb45bf999d946484f9 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Fri, 11 Dec 2015 10:27:49 -0800 Subject: [PATCH 304/309] "MMB29R" Change-Id: I85209df8538f80a14bb1cec3c9635925617885cc --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index b1746509c..8376343f4 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29Q +export BUILD_ID=MMB29R From eb77f590c02eed880fdcbba54ac896d6a730873e Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Tue, 15 Dec 2015 12:55:01 -0800 Subject: [PATCH 305/309] "MMB76" Change-Id: I193028de6dc562f306eaac9c1e7b41b9298a2518 --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index 8376343f4..f70407feb 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29R +export BUILD_ID=MMB76 From e248f220f7b2c230d03b68e5f6a2bba1f1b01de8 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Tue, 15 Dec 2015 13:31:36 -0800 Subject: [PATCH 306/309] "MMB29S" Change-Id: I749187b43f2fac770f699523946985a0997031e4 --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index f70407feb..d0824fb80 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB76 +export BUILD_ID=MMB29S From 9ab7efe532ccc1d927c00bf91627d26e362ce9f6 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Wed, 16 Dec 2015 12:07:39 -0800 Subject: [PATCH 307/309] "MMB29T" Change-Id: I593d12abf30efcf76f85d9c63a92fcd2f5208f86 --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index d0824fb80..16709e323 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29S +export BUILD_ID=MMB29T From d92eaf3e8ac27df8a3a3f42b3276d65868fafc79 Mon Sep 17 00:00:00 2001 From: The Android Automerger Date: Thu, 17 Dec 2015 13:15:30 -0800 Subject: [PATCH 308/309] "MMB29U" Change-Id: I5e94245015a925044780edb3f6cbdcc7fc5304d2 --- core/build_id.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/build_id.mk b/core/build_id.mk index 16709e323..f55b1842b 100644 --- a/core/build_id.mk +++ b/core/build_id.mk @@ -18,4 +18,4 @@ # (like "CRB01"). It must be a single word, and is # capitalized by convention. -export BUILD_ID=MMB29T +export BUILD_ID=MMB29U From 1514a5c3743e3c1f44517b08370eca565bf788fc Mon Sep 17 00:00:00 2001 From: Jake Whatley Date: Fri, 23 Oct 2015 20:12:04 +0300 Subject: [PATCH 309/309] Fix uncompressed ramdisk generation --- core/Makefile | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/core/Makefile b/core/Makefile index 215a3edf9..ae39108f6 100644 --- a/core/Makefile +++ b/core/Makefile @@ -950,10 +950,9 @@ $(RECOVERY_INSTALL_OTA_KEYS): $(OTA_PUBLIC_KEYS) $(DUMPKEY_JAR) $(extra_keys) @mkdir -p $(dir $@) java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) $(extra_keys) > $@ -RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id -# $(1): output file -define build-recoveryimage-target - @echo -e ${CL_CYN}"----- Making recovery image ------"${CL_RST} +define build-recoveryramdisk +@echo -e ${CL_CYN}"----- Making recovery ramdisk ------"${CL_RST} + $(hide) mkdir -p $(TARGET_RECOVERY_OUT) $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/tmp @echo -e ${CL_CYN}"Copying baseline ramdisk..."${CL_RST} @@ -977,24 +976,23 @@ define build-recoveryimage-target $(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop - $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk) - $(if $(BOARD_CUSTOM_BOOTIMG_MK),, - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ - $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ - $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\ - $(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)) - $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ - $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) - $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) - @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST}) endef -$(recovery_uncompressed_ramdisk): $(MINIGZIP) $(recovery_ramdisk) - @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} - $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ +RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id +# $(1): output file +define build-recoveryimage-target + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ + $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \ + $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE)) + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\ + $(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)) + $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \ + $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)) + $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) + @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST} +endef -$(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) \ +$(recovery_uncompressed_ramdisk): $(MKBOOTFS) \ $(INSTALLED_RAMDISK_TARGET) \ $(INSTALLED_BOOTIMAGE_TARGET) \ $(INTERNAL_RECOVERYIMAGE_FILES) \ @@ -1003,10 +1001,19 @@ $(recovery_ramdisk): $(MKBOOTFS) $(MINIGZIP) \ $(recovery_build_prop) $(recovery_resource_deps) $(recovery_root_deps) \ $(recovery_fstab) \ $(RECOVERY_INSTALL_OTA_KEYS) + $(call build-recoveryramdisk) + @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST} + $(hide) $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@ + +$(recovery_ramdisk): $(MINIGZIP) \ + $(recovery_uncompressed_ramdisk) + @echo -e ${CL_CYN}"----- Making compressed recovery ramdisk ------"${CL_RST} + $(hide) $(MINIGZIP) < $(recovery_uncompressed_ramdisk) > $@ ifndef BOARD_CUSTOM_BOOTIMG_MK $(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel) \ $(RECOVERYIMAGE_EXTRA_DEPS) + @echo -e ${CL_CYN}"----- Making recovery image ------"${CL_RST} $(call build-recoveryimage-target, $@) endif # BOARD_CUSTOM_BOOTIMG_MK @@ -1082,6 +1089,9 @@ $(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET) .PHONY: recoveryimage-nodeps recoveryimage-nodeps: @echo "make $@: ignoring dependencies" + $(call build-recoveryramdisk) + $(hide) $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $(recovery_uncompressed_ramdisk) + $(hide) $(MINIGZIP) < $(recovery_uncompressed_ramdisk) > $(recovery_ramdisk) $(call build-recoveryimage-target, $(INSTALLED_RECOVERYIMAGE_TARGET)) else