浏览代码

scripts: port to Python 3

Signed-off-by: Gerard Marull-Paretas <gerard@teslabs.com>
Gerard Marull-Paretas 6 月之前
父节点
当前提交
07f359b62b
共有 56 个文件被更改,包括 341 次插入328 次删除
  1. 2 0
      .gitignore
  2. 1 1
      platform/wscript
  3. 1 1
      python_libs/pblprog/setup.py
  4. 8 8
      python_libs/pebble-commander/pebble/commander/apps/flash_imaging.py
  5. 2 2
      python_libs/pebble-commander/pebble/commander/apps/prompt.py
  6. 4 4
      python_libs/pebble-commander/pebble/commander/apps/streaming_logs.py
  7. 1 1
      python_libs/pebble-commander/pebble/commander/commander.py
  8. 8 8
      python_libs/pebble-commander/pebble/commander/util/stm32_crc.py
  9. 2 2
      python_libs/pulse2/pebble/pulse2/link.py
  10. 1 1
      python_libs/pulse2/pebble/pulse2/pcmp.py
  11. 1 1
      python_libs/pulse2/pebble/pulse2/transports.py
  12. 2 2
      python_libs/pulse2/setup.py
  13. 1 0
      requirements-linux.txt
  14. 4 4
      sdk/tools/inject_metadata.py
  15. 1 1
      sdk/wscript
  16. 1 1
      src/fw/shell/wscript
  17. 3 3
      src/fw/vendor/jerryscript/wscript
  18. 2 2
      src/fw/wscript
  19. 6 6
      tests/wscript
  20. 6 5
      tools/applib_malloc.py
  21. 8 9
      tools/binutils.py
  22. 22 23
      tools/bitmapgen.py
  23. 58 41
      tools/clang_compat/clang/cindex.py
  24. 13 13
      tools/font/fontgen.py
  25. 2 1
      tools/fw_binary_info.py
  26. 2 2
      tools/generate_appinfo.py
  27. 2 2
      tools/generate_c_byte_array.py
  28. 0 0
      tools/generate_native_sdk/__init__.py
  29. 5 3
      tools/generate_native_sdk/extract_symbol_info.py
  30. 7 2
      tools/generate_native_sdk/generate_pebble_native_sdk_files.py
  31. 14 13
      tools/generate_native_sdk/parse_c_decl.py
  32. 2 2
      tools/generate_pdcs/json2commands.py
  33. 6 6
      tools/generate_pdcs/pdc_gen.py
  34. 8 7
      tools/generate_pdcs/pebble_commands.py
  35. 9 9
      tools/generate_pdcs/svg2commands.py
  36. 1 1
      tools/log_hashing/check_elf_log_strings.py
  37. 3 3
      tools/log_hashing/logdehash.py
  38. 2 2
      tools/mpu_calc.py
  39. 8 9
      tools/pbpack.py
  40. 8 8
      tools/pebble_image_routines.py
  41. 4 4
      tools/png2pblpng.py
  42. 1 1
      tools/pulse_console.py
  43. 2 2
      tools/resources/find_resource_filename.py
  44. 2 2
      tools/resources/resource_map/resource_generator_vibe.py
  45. 1 1
      tools/resources/types/resource_ball.py
  46. 14 15
      tools/stm32_crc.py
  47. 22 21
      tools/timezones.py
  48. 8 8
      tools/tool_check.py
  49. 4 4
      waftools/generate_timezone_data.py
  50. 1 1
      waftools/gitinfo.py
  51. 2 2
      waftools/ldscript.py
  52. 6 6
      waftools/pebble_arm_gcc.py
  53. 2 2
      waftools/pebble_test.py
  54. 3 3
      waftools/show_configure.py
  55. 1 1
      waftools/xcode_pebble.py
  56. 31 46
      wscript

+ 2 - 0
.gitignore

@@ -55,3 +55,5 @@ analyze_mcu_flash_usage_treemap.jsonp
 .env
 
 .vscode
+.venv
+*.egg-info

+ 1 - 1
platform/wscript

@@ -202,7 +202,7 @@ def add_platform_defines(ctx, env):
     if 'NRF5' in env.MICRO_FAMILY:
         env.append_value('DEFINES', 'MICRO_FAMILY_NRF5=1')
 
-    for cap, val in ctx.capabilities_dict().iteritems():
+    for cap, val in ctx.capabilities_dict().items():
         env.append_value('DEFINES', "CAPABILITY_%s=%s" % (cap, int(val)))
 
 # Build

+ 1 - 1
python_libs/pblprog/setup.py

@@ -28,7 +28,7 @@ setup(
 
     install_requires=[
         'intelhex>=2.1,<3',
-        'pyftdi==0.10.5'
+        'pyftdi'
     ],
 
     package_data={

+ 8 - 8
python_libs/pebble-commander/pebble/commander/apps/flash_imaging.py

@@ -43,7 +43,7 @@ class EraseCommand(object):
                 self.command_type, self.address, self.length)
 
     def parse_response(self, response):
-        if ord(response[0]) != self.response_type:
+        if response[0] != self.response_type:
             raise exceptions.ResponseParseError(
                     'Unexpected response: %r' % response)
         unpacked = self.Response._make(self.response_struct.unpack(response))
@@ -81,7 +81,7 @@ class WriteResponse(object):
 
     @classmethod
     def parse(cls, response):
-        if ord(response[0]) != cls.response_type:
+        if response[0] != cls.response_type:
             raise exceptions.ResponseParseError(
                     'Unexpected response: %r' % response)
         return cls.Response._make(cls.response_struct.unpack(response))
@@ -106,7 +106,7 @@ class CrcCommand(object):
                                         self.length)
 
     def parse_response(self, response):
-        if ord(response[0]) != self.response_type:
+        if response[0] != self.response_type:
             raise exceptions.ResponseParseError(
                     'Unexpected response: %r' % response)
         unpacked = self.Response._make(self.response_struct.unpack(response))
@@ -140,7 +140,7 @@ class QueryFlashRegionCommand(object):
         return self.command_struct.pack(self.command_type, self.region)
 
     def parse_response(self, response):
-        if ord(response[0]) != self.response_type:
+        if response[0] != self.response_type:
             raise exceptions.ResponseParseError(
                     'Unexpected response: %r' % response)
         unpacked = self.Response._make(self.response_struct.unpack(response))
@@ -165,7 +165,7 @@ class FinalizeFlashRegionCommand(object):
         return self.command_struct.pack(self.command_type, self.region)
 
     def parse_response(self, response):
-        if ord(response[0]) != self.response_type:
+        if response[0] != self.response_type:
             raise exceptions.ResponseParseError(
                     'Unexpected response: %r' % response)
         region, = self.response_struct.unpack(response)
@@ -216,7 +216,7 @@ class FlashImaging(object):
         mtu = self.socket.mtu - WriteCommand.header_len
         assert(mtu > 0)
         unsent = collections.deque()
-        for offset in xrange(0, len(data), mtu):
+        for offset in range(0, len(data), mtu):
             segment = data[offset:offset+mtu]
             assert(len(segment))
             seg_address = address + offset
@@ -266,7 +266,7 @@ class FlashImaging(object):
             to_retry = []
             timeout_time = time.time() - 0.5
             for (seg_address,
-                    (cmd, send_time, retry_count)) in in_flight.iteritems():
+                    (cmd, send_time, retry_count)) in in_flight.copy().items():
                 if send_time > timeout_time:
                     # in_flight is an OrderedDict so iteration is in
                     # chronological order.
@@ -296,7 +296,7 @@ class FlashImaging(object):
         return retries
 
     def _command_and_response(self, cmd, timeout=0.5):
-        for attempt in xrange(5):
+        for attempt in range(5):
             self.socket.send(cmd.packet)
             try:
                 packet = self.socket.receive(timeout=timeout)

+ 2 - 2
python_libs/pebble-commander/pebble/commander/apps/prompt.py

@@ -32,7 +32,7 @@ class Prompt(object):
 
     def command_and_response(self, command_string, timeout=20):
         log = []
-        self.socket.send(bytes(command_string))
+        self.socket.send(command_string.encode())
 
         is_done = False
         while not is_done:
@@ -42,7 +42,7 @@ class Prompt(object):
                 if response.is_done_response:
                     is_done = True
                 elif response.is_message_response:
-                    log.append(response.message)
+                    log.append(response.message.decode())
             except pebble.pulse2.exceptions.ReceiveQueueEmpty:
                 raise exceptions.CommandTimedOut
         return log

+ 4 - 4
python_libs/pebble-commander/pebble/commander/apps/streaming_logs.py

@@ -35,12 +35,12 @@ class LogMessage(collections.namedtuple('LogMessage',
     @classmethod
     def parse(cls, packet):
         result = cls.response_struct.unpack(packet[:cls.response_struct.size])
-        msg = packet[cls.response_struct.size:]
+        msg = packet[cls.response_struct.size:].decode("utf8")
 
-        log_level = result[2]
-        task = result[3]
+        log_level = result[2].decode("utf8")
+        task = result[3].decode("utf8")
         timestamp = datetime.fromtimestamp(result[4] / 1000.0)
-        file_name = result[1].split('\x00', 1)[0]  # NUL terminated
+        file_name = result[1].split(b'\x00', 1)[0].decode("utf8")  # NUL terminated
         line_number = result[5]
 
         return cls(log_level, task, timestamp, file_name, line_number, msg)

+ 1 - 1
python_libs/pebble-commander/pebble/commander/commander.py

@@ -111,7 +111,7 @@ class PebbleCommander(object):
                 raise ValueError('function name %s clashes with existing attribute' % funcname)
             fn.is_command = True
             fn.name = cmdname
-            method = types.MethodType(fn, None, cls)
+            method = types.MethodType(fn, cls)
             setattr(cls, funcname, method)
 
             return fn

+ 8 - 8
python_libs/pebble-commander/pebble/commander/util/stm32_crc.py

@@ -16,9 +16,9 @@ CRC_POLY = 0x04C11DB7
 
 def precompute_table(bits):
     lookup_table = []
-    for i in xrange(2**bits):
+    for i in range(2**bits):
         rr = i << (32 - bits)
-        for x in xrange(bits):
+        for x in range(bits):
             if rr & 0x80000000:
                 rr = (rr << 1) ^ CRC_POLY
             else:
@@ -31,18 +31,18 @@ lookup_table = precompute_table(8)
 def process_word(data, crc=0xffffffff):
     if (len(data) < 4):
         # The CRC data is "padded" in a very unique and confusing fashion.
-        data = data[::-1] + '\0' * (4 - len(data))
+        data = data[::-1] + b'\0' * (4 - len(data))
 
     for char in reversed(data):
-        b = ord(char)
+        b = char
         crc = ((crc << 8) ^ lookup_table[(crc >> 24) ^ b]) & 0xffffffff
     return crc
 
 def process_buffer(buf, c=0xffffffff):
-    word_count = (len(buf) + 3) / 4
+    word_count = (len(buf) + 3) // 4
 
     crc = c
-    for i in xrange(word_count):
+    for i in range(word_count):
         crc = process_word(buf[i * 4 : (i + 1) * 4], crc)
     return crc
 
@@ -57,9 +57,9 @@ if __name__ == '__main__':
     assert(0x519b130 == process_buffer("\xfe\xff\xfe\xff"))
     assert(0x495e02ca == process_buffer("\xfe\xff\xfe\xff\x88"))
 
-    print "All tests passed!"
+    print("All tests passed!")
 
     if len(sys.argv) >= 2:
         b = open(sys.argv[1]).read()
         crc = crc32(b)
-        print "%u or 0x%x" % (crc, crc)
+        print("%u or 0x%x" % (crc, crc))

+ 2 - 2
python_libs/pulse2/pebble/pulse2/link.py

@@ -295,7 +295,7 @@ class Link(object):
         self.mtu = mtu
 
         self.transports = {}
-        for name, factory in self.TRANSPORTS.iteritems():
+        for name, factory in self.TRANSPORTS.items():
             transport = factory(interface, mtu)
             self.transports[name] = transport
 
@@ -310,5 +310,5 @@ class Link(object):
         self.closed = True
         if self.on_close:
             self.on_close()
-        for transport in self.transports.itervalues():
+        for _, transport in self.transports.items():
             transport.down()

+ 1 - 1
python_libs/pulse2/pebble/pulse2/pcmp.py

@@ -54,7 +54,7 @@ class PCMPPacket(collections.namedtuple('PCMPPacket', 'code information')):
         packet = bytes(packet)
         if len(packet) < 1:
             raise ParseError('packet too short')
-        return cls(code=struct.unpack('B', packet[0])[0],
+        return cls(code=struct.unpack('B', packet[0:1])[0],
                    information=packet[1:])
 
     @staticmethod

+ 1 - 1
python_libs/pulse2/pebble/pulse2/transports.py

@@ -557,7 +557,7 @@ class ReliableTransport(object):
             return
 
         # Information packets have the LSBit of the first byte cleared.
-        is_info = (bytearray(packet[0])[0] & 0b1) == 0
+        is_info = (bytearray(packet[0:1])[0] & 0b1) == 0
         try:
             if is_info:
                 fields = ReliableInfoPacket.parse(packet)

+ 2 - 2
python_libs/pulse2/setup.py

@@ -28,8 +28,8 @@ with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
 requires = [
         'cobs',
         'construct>=2.5.3,<2.8',
-        'pyserial>=2.7,<3',
-        'transitions>=0.4.0',
+        'pyserial',
+        'transitions==0.4.1',
 ]
 
 test_requires = []

+ 1 - 0
requirements-linux.txt

@@ -25,6 +25,7 @@ intelhex>=2.1,<3
 protobuf==3.6
 certifi
 libclang
+packaging
 
 # pypi.getpebble.com provided modules
 #pebble.loghash>=2.6

+ 4 - 4
sdk/tools/inject_metadata.py

@@ -94,7 +94,7 @@ def inject_metadata(target_binary, target_elf, resources_file, timestamp, allow_
     def get_nm_output(elf_file):
         nm_process = Popen(['arm-none-eabi-nm', elf_file], stdout=PIPE)
         # Popen.communicate returns a tuple of (stdout, stderr)
-        nm_output = nm_process.communicate()[0]
+        nm_output = nm_process.communicate()[0].decode("utf8")
 
         if not nm_output:
             raise InvalidBinaryError()
@@ -125,7 +125,7 @@ def inject_metadata(target_binary, target_elf, resources_file, timestamp, allow_
 
         readelf_bss_process = Popen("arm-none-eabi-readelf -S '%s'" % elf_file, 
                                     shell=True, stdout=PIPE)
-        readelf_bss_output = readelf_bss_process.communicate()[0]
+        readelf_bss_output = readelf_bss_process.communicate()[0].decode("utf8")
 
         # readelf -S output looks like the following...
         #
@@ -176,7 +176,7 @@ def inject_metadata(target_binary, target_elf, resources_file, timestamp, allow_
 
         # get the .data locations
         readelf_relocs_process = Popen(['arm-none-eabi-readelf', '-r', elf_file], stdout=PIPE)
-        readelf_relocs_output = readelf_relocs_process.communicate()[0]
+        readelf_relocs_output = readelf_relocs_process.communicate()[0].decode("utf8")
         lines = readelf_relocs_output.splitlines()
 
         i = 0
@@ -198,7 +198,7 @@ def inject_metadata(target_binary, target_elf, resources_file, timestamp, allow_
         # get any Global Offset Table (.got) entries
         readelf_relocs_process = Popen(['arm-none-eabi-readelf', '--sections', elf_file],
                                        stdout=PIPE)
-        readelf_relocs_output = readelf_relocs_process.communicate()[0]
+        readelf_relocs_output = readelf_relocs_process.communicate()[0].decode("utf8")
         lines = readelf_relocs_output.splitlines()
         for line in lines:
             # We shouldn't need to do anything with the Procedure Linkage Table since we don't

+ 1 - 1
sdk/wscript

@@ -179,7 +179,7 @@ def build(bld):
         def _collect_check_templates_tasks(dct):
             for key in dct:
                 val = dct[key]
-                if isinstance(val, basestring):
+                if isinstance(val, str):
                     # avoid unicode, it will trip up waf's Node3 and make it 💩 all over the place
                     val = str(val)
                     template_node = defaults_node.find_node(val.split(os.path.sep))

+ 1 - 1
src/fw/shell/wscript

@@ -44,7 +44,7 @@ def generate_apps_table(task):
         f_out.write('#include "resource/resource_ids.auto.h"\n\n')
 
         # zeroed app id's indicate disabled apps
-        enabled_system_apps = filter(lambda e: e['id'] != 0, system_apps)
+        enabled_system_apps = list(filter(lambda e: e['id'] != 0, system_apps))
 
         # write out function declarations for system apps
         for entry in enabled_system_apps:

+ 3 - 3
src/fw/vendor/jerryscript/wscript

@@ -158,7 +158,7 @@ def build(bld):
         try:
             return bld.cmd_and_log(cmd, quiet=Context.BOTH).strip()
         except Errors.WafError as e:
-            print e
+            print(e)
             return "unknown"
 
     # Build jerry_common_config.
@@ -177,7 +177,7 @@ def build(bld):
       'JERRY_ENABLE_ERROR_MESSAGES': '1',
     }
     jerry_common_defines = [
-      '{}={}'.format(k, v) for k, v in jerry_common_defines_dict.iteritems()]
+      '{}={}'.format(k, v) for k, v in jerry_common_defines_dict.items()]
     bld(export_defines=jerry_common_defines, name='jerry_common_config')
 
     # Build jerry_runtime_config.
@@ -188,7 +188,7 @@ def build(bld):
         'JMEM_STATS': '1',
     }
     jerry_common_runtime_defines = [
-        '{}={}'.format(k, v) for k, v in jerry_common_runtime_defines_dict.iteritems()]
+        '{}={}'.format(k, v) for k, v in jerry_common_runtime_defines_dict.items()]
     bld(export_defines=jerry_common_runtime_defines, name='jerry_runtime_config')
 
 

+ 2 - 2
src/fw/wscript

@@ -144,7 +144,7 @@ def _generate_memory_layout(bld):
         with open(bld.path.find_node('bootloader_symbols.json').abspath(), 'r') as f:
             bootloader_symbols_json = json.load(f)
 
-        bootloader_symbols = bootloader_symbols_json['bootloader_symbols'].iteritems()
+        bootloader_symbols = bootloader_symbols_json['bootloader_symbols'].items()
         bootloader_symbol_strings = ("%s = %s;" % (n, v) for n, v in bootloader_symbols)
         bootloader_symbol_definitions = "\n    ".join(bootloader_symbol_strings)
     else:
@@ -171,7 +171,7 @@ def _generate_memory_layout(bld):
     # configuring a segment with a size that is not a multiple of eight will
     # result in segments being smaller than expected. The runtime_reserved
     # size is not checked as its value isn't currently used anywhere.
-    for platform, sizes in APP_RAM_SIZES.iteritems():
+    for platform, sizes in APP_RAM_SIZES.items():
         if sizes.app_segment % 8 != 0:
             bld.fatal("The app_segment size for APP_RAM_SIZES[%r] is not a "
                       "multiple of eight bytes. You're gonna have a bad "

+ 6 - 6
tests/wscript

@@ -16,7 +16,7 @@ def remove_old_coverage_files(bld):
 
 def update_lcov(bld):
     """ Update lcov-related files based on the results of `./waf test`"""
-    print "Generating code coverage information using lcov..."
+    print("Generating code coverage information using lcov...")
     lcov_version_cmd = ['lcov', '--version']
     # Send stdout of checking for lcov to /dev/null to hide it; stderr will still be visible
     with open(os.devnull, 'w') as devnull_fp:
@@ -37,7 +37,7 @@ def update_lcov(bld):
         cmd += platform_specific_lcov_args
         bld.cmd_and_log(cmd, quiet=waflib.Context.BOTH)
     except waflib.Errors.WafError as e:
-        print e.stdout, '\n', e.stderr
+        print(e.stdout, '\n', e.stderr)
         bld.fatal("Error running `lcov`")
     if bld.options.coverage:
         lcov_html_directory = os.path.join(tests_path, 'lcov-html')
@@ -45,10 +45,10 @@ def update_lcov(bld):
         try:
             bld.cmd_and_log(genhtml_cmd, quiet=waflib.Context.BOTH)
         except waflib.Errors.WafError as e:
-            print e.stdout, '\n', e.stderr
+            print(e.stdout, '\n', e.stderr)
             bld.fatal("Error running `genhtml`")
         index_html = os.path.join(lcov_html_directory, 'index.html')
-        print "Updated coverage report at %s" % index_html
+        print("Updated coverage report at %s" % index_html)
 
 def convert_png_to_pbi(task):
     src_png = task.inputs[0].srcpath()
@@ -61,7 +61,7 @@ def convert_png_to_pbi(task):
         img_fmt = 'bw'
     else:
         img_fmt = 'color'  # raw and palettized color images
-        bit_suffix = re.search('(\d)bitpalette\.png', dest_pbi)
+        bit_suffix = re.search(r'(\d)bitpalette\.png', dest_pbi)
         if bit_suffix:
             bitdepth = int(bit_suffix.group(1))
 
@@ -79,7 +79,7 @@ def convert_png_to_pblpng(task):
     else:
         palette_name = 'pebble64'
         bitdepth = None
-        bit_suffix = re.search('(\d)bit(palette)?\.png', dest_png)
+        bit_suffix = re.search(r'(\d)bit(palette)?\.png', dest_png)
 
         if bit_suffix:
             bitdepth = int(bit_suffix.group(1))

+ 6 - 5
tools/applib_malloc.py

@@ -46,7 +46,7 @@ class ApplibType(object):
         self.total_3x_padding = self.size_3x_direct_padding
 
         for d in self.dependencies:
-            parent = filter(lambda t: d == t.name, all_types)[0]
+            parent = next(filter(lambda t: d == t.name, all_types))
             self.total_3x_padding += parent.get_total_3x_padding(all_types)
 
         return self.total_3x_padding
@@ -143,8 +143,9 @@ def dump_sizes(json_filename, elf_filename):
     all_types = get_types(data)
     fmt_str = "%30s %10s %10s %10s %16s %16s %16s  %s"
 
-    print fmt_str % ("Type", "sizeof()", "Size 2.x", "Size 3.x",
-                     "direct padding", "total padding", "calculated size", "dependencies")
+    print(fmt_str % ("Type", "sizeof()", "Size 2.x", "Size 3.x",
+                     "direct padding", "total padding", "calculated size",
+                     "dependencies"))
 
     for t in all_types:
         type_sizeof = _get_sizeof_type(elf_filename, t.name)
@@ -155,9 +156,9 @@ def dump_sizes(json_filename, elf_filename):
         else:
             calculated_size_str = "%u <%u>" % (calculated_size, (calculated_size - t.size_3x))
 
-        print fmt_str % (t.name, type_sizeof, t.size_2x, t.size_3x,
+        print(fmt_str % (t.name, type_sizeof, t.size_2x, t.size_3x,
                          t.size_3x_direct_padding, t.get_total_3x_padding(all_types),
-                         calculated_size_str, t.dependencies)
+                         calculated_size_str, t.dependencies))
 
 
 if __name__ == "__main__":

+ 8 - 9
tools/binutils.py

@@ -57,11 +57,11 @@ class FileInfo(object):
         return result
 
     def pprint(self, verbose):
-        print '  %s: size %u' % (self.filename, self.size)
+        print('  %s: size %u' % (self.filename, self.size))
         if verbose:
             l = sorted(self.symbols.itervalues(), key=lambda x: -x.size)
             for s in l:
-                print '    %6u %-36s' % (s.size, s.name)
+                print('    %6u %-36s' % (s.size, s.name))
 
     def __str__(self):
         return '<FileInfo %s: %u>' % (self.filename, self.size)
@@ -95,7 +95,7 @@ class SectionInfo(object):
         return self.files.values()
 
     def pprint(self, summary, verbose):
-        print '%s: count %u size %u' % (self.name, self.count, self.size)
+        print('%s: count %u size %u' % (self.name, self.count, self.size))
 
         if not summary:
             l = self.files.values()
@@ -205,15 +205,14 @@ def _get_symbols_table(f):
 
     addr2line.kill()
 
-    print
     return symbols
 
 
 # This method is quite slow, but works around a bug in nm.
 def _nm_generator_slow(f):
-    print "Getting list of symbols..."
+    print("Getting list of symbols...")
     symbols = _get_symbols_table(f)
-    print "Aggregating..."
+    print("Aggregating...")
     infile = sh.arm_none_eabi_nm('-S', f)
 
     line_pattern = re.compile(r"""([0-9a-f]+)\s+ # address
@@ -285,10 +284,10 @@ def size(elf_path):
     """
     output = subprocess.check_output(["arm-none-eabi-size", elf_path])
 
-    lines = output.splitlines()
+    lines = output.decode("utf8").splitlines()
     if len(lines) < 2:
         return 0
-    match = re.match("^\s*([0-9]+)\s+([0-9]+)\s+([0-9]+)", lines[1])
+    match = re.match(r"^\s*([0-9]+)\s+([0-9]+)\s+([0-9]+)", lines[1])
     if not match:
         return 0
     # text, data, bss
@@ -322,5 +321,5 @@ def section_bytes(elf_path, section_name):
     with tempfile.NamedTemporaryFile() as temp:
         sh.arm_none_eabi_objcopy(['-j', section_name, '-O', 'binary',
                                   elf_path, temp.name])
-        with open(temp.name) as f:
+        with open(temp.name, "rb") as f:
             return f.read()

+ 22 - 23
tools/bitmapgen.py

@@ -14,7 +14,7 @@
 # limitations under the License.
 
 
-import StringIO
+from io import StringIO
 import argparse
 import os
 import struct
@@ -123,7 +123,7 @@ class PebbleBitmap(object):
                 if any(row):
                     break
                 left += 1
-            for row in reversed(alphas_transposed):
+            for row in list(zip(*alphas_transposed))[::-1]:
                 if any(row):
                     break
                 right -= 1
@@ -145,9 +145,9 @@ class PebbleBitmap(object):
             return self.w
         elif self.bitmap_format == FORMAT_COLOR:
             # adds (8 / bitdepth) - 1 to round up (ceil) to the next nearest byte
-            return (self.w + ((8 / self.bitdepth) - 1)) / (8 / self.bitdepth)
+            return (self.w + ((8 // self.bitdepth) - 1)) // (8 // self.bitdepth)
         else:
-            row_size_padded_words = (self.w + 31) / 32
+            row_size_padded_words = (self.w + 31) // 32
             return row_size_padded_words * 4
 
     def info_flags(self):
@@ -180,7 +180,7 @@ class PebbleBitmap(object):
 
         def pack_pixels_to_bitblt_word(pixels, x_offset, x_max):
             word = 0
-            for column in xrange(0, 32):
+            for column in range(0, 32):
                 x = x_offset + column
                 if (x < x_max):
                     pixel = pixels[x]
@@ -190,17 +190,17 @@ class PebbleBitmap(object):
 
         src_pixels = self._im_pixels
         out_pixels = []
-        row_size_words = self.row_size_bytes() / 4
+        row_size_words = self.row_size_bytes() // 4
 
-        for row in xrange(self.y, self.y + self.h):
+        for row in range(self.y, self.y + self.h):
             x_max = self._im_size[0]
-            for column_word in xrange(0, row_size_words):
+            for column_word in range(0, row_size_words):
                 x_offset = self.x + column_word * 32
                 out_pixels.append(pack_pixels_to_bitblt_word(src_pixels[row],
                                                              x_offset,
                                                              x_max))
 
-        return ''.join(out_pixels)
+        return b''.join(out_pixels)
 
     def image_bits_color(self):
         """
@@ -214,10 +214,10 @@ class PebbleBitmap(object):
 
         assert self.bitdepth is not None
         out_pixels = []
-        for row in xrange(self.y, self.y + self.h):
+        for row in range(self.y, self.y + self.h):
             packed_count = 0
             packed_value = 0
-            for column in xrange(self.x, self.x + self.w):
+            for column in range(self.x, self.x + self.w):
                 pixel = self._im_pixels[row][column]
                 r, g, b, a = [pixel[i] for i in range(4)]
 
@@ -260,7 +260,7 @@ class PebbleBitmap(object):
             return self.image_bits_bw()
 
     def header(self):
-        f = StringIO.StringIO()
+        f = StringIO()
         f.write("// GBitmap + pixel data generated by bitmapgen.py:\n\n")
         bytes = self.image_bits()
         bytes_var_name = "s_{var_name}_pixels".format(var_name=self.name)
@@ -283,20 +283,19 @@ class PebbleBitmap(object):
         return to_file
 
     def convert_to_pbi(self):
-        pbi_bits = []
         image_data = self.image_bits()  # compute before generating header
 
-        pbi_bits.extend(self.pbi_header())
-        pbi_bits.extend(image_data)
+        pbi_bits = self.pbi_header()
+        pbi_bits += image_data
         if self.palette and self.bitdepth < 8:
             # write out palette, padded to the bitdepth
-            for i in xrange(0, 2**self.bitdepth):
+            for i in range(0, 2**self.bitdepth):
                 value = 0
                 if i < len(self.palette):
                     value = self.palette[i]
-                pbi_bits.extend(struct.pack('B', value))
+                pbi_bits += struct.pack('B', value)
 
-        return b"".join(pbi_bits)
+        return pbi_bits
 
     def convert_to_pbi_file(self, pbi_file=None):
         to_file = pbi_file if pbi_file else (os.path.splitext(self.path)[0] + '.pbi')
@@ -308,8 +307,8 @@ class PebbleBitmap(object):
 
     def generate_palette(self):
         self.palette = []
-        for row in xrange(self.y, self.y + self.h):
-            for column in xrange(self.x, self.x + self.w):
+        for row in range(self.y, self.y + self.h):
+            for column in range(self.x, self.x + self.w):
                 pixel = self._im_pixels[row][column]
                 r, g, b, a = [pixel[i] for i in range(4)]
 
@@ -347,7 +346,7 @@ def cmd_pbi(args):
 def cmd_header(args):
     pb = PebbleBitmap(args.input_png, bitmap_format=args.format,
                       color_reduction_method=args.color_reduction_method, crop=not args.disable_crop)
-    print pb.header()
+    print(pb.header())
 
 
 def cmd_white_trans_pbi(args):
@@ -382,10 +381,10 @@ def process_all_bitmaps():
     f.close()
 
 def grouper(iterable, n, fillvalue=None):
-    from itertools import izip_longest
+    from itertools import zip_longest
 
     args = [iter(iterable)] * n
-    return izip_longest(fillvalue=fillvalue, *args)
+    return zip_longest(fillvalue=fillvalue, *args)
 
 def process_cmd_line_args():
     parser = argparse.ArgumentParser(description="Generate pebble-usable files from png images")

+ 58 - 41
tools/clang_compat/clang/cindex.py

@@ -621,11 +621,10 @@ class BaseEnumeration(object):
     """
 
     def __init__(self, value):
-        if value >= len(self.__class__._kinds):
-            self.__class__._kinds += [None] * (value - len(self.__class__._kinds) + 1)
-        if self.__class__._kinds[value] is not None:
-            raise ValueError('{0} value {1} already loaded'.format(
-                str(self.__class__), value))
+        if value >= len(CursorKind._kinds):
+            CursorKind._kinds += [None] * (value - len(CursorKind._kinds) + 1)
+        if CursorKind._kinds[value] is not None:
+            raise ValueError('CursorKind already loaded')
         self.value = value
         self.__class__._kinds[value] = self
         self.__class__._name_map = None
@@ -644,24 +643,11 @@ class BaseEnumeration(object):
                     self._name_map[value] = key
         return self._name_map[self]
 
-    @classmethod
-    def from_id(cls, id):
-        if id >= len(cls._kinds) or cls._kinds[id] is None:
-            raise ValueError('Unknown template argument kind %d' % id)
-        return cls._kinds[id]
-
-    def __repr__(self):
-        return '%s.%s' % (self.__class__, self.name,)
-
-
-class CursorKind(BaseEnumeration):
-    """
-    A CursorKind describes the kind of entity that a cursor points to.
-    """
-
-    # The required BaseEnumeration declarations.
-    _kinds = []
-    _name_map = None
+    @staticmethod
+    def from_id(id):
+        if id >= len(CursorKind._kinds) or CursorKind._kinds[id] is None:
+            raise ValueError('Unknown cursor kind %d' % id)
+        return CursorKind._kinds[id]
 
     @staticmethod
     def get_all_kinds():
@@ -2019,10 +2005,33 @@ class TypeKind(BaseEnumeration):
     _kinds = []
     _name_map = None
 
+    def __init__(self, value):
+        if value >= len(TypeKind._kinds):
+            TypeKind._kinds += [None] * (value - len(TypeKind._kinds) + 1)
+        if TypeKind._kinds[value] is not None:
+            raise ValueError('TypeKind already loaded')
+        self.value = value
+        TypeKind._kinds[value] = self
+        TypeKind._name_map = None
+
+    def from_param(self):
+        return self.value
+
     @property
-    def spelling(self):
-        """Retrieve the spelling of this TypeKind."""
-        return conf.lib.clang_getTypeKindSpelling(self.value)
+    def name(self):
+        """Get the enumeration name of this cursor kind."""
+        if self._name_map is None:
+            self._name_map = {}
+            for key,value in TypeKind.__dict__.items():
+                if isinstance(value,TypeKind):
+                    self._name_map[value] = key
+        return self._name_map[self]
+
+    @staticmethod
+    def from_id(id):
+        if id >= len(TypeKind._kinds) or TypeKind._kinds[id] is None:
+            raise ValueError('Unknown type kind %d' % id)
+        return TypeKind._kinds[id]
 
     def __repr__(self):
         return 'TypeKind.%s' % (self.name,)
@@ -2999,13 +3008,17 @@ class TranslationUnit(ClangObject):
         unsaved_files_array = 0
         if len(unsaved_files):
             unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
-            for i,(name,contents) in enumerate(unsaved_files):
-                if hasattr(contents, "read"):
-                    contents = contents.read()
-                contents = b(contents)
-                unsaved_files_array[i].name = b(fspath(name))
-                unsaved_files_array[i].contents = contents
-                unsaved_files_array[i].length = len(contents)
+            for i,(name,value) in enumerate(unsaved_files):
+                if not isinstance(value, str):
+                    # FIXME: It would be great to support an efficient version
+                    # of this, one day.
+                    value = value.read()
+                    print(value)
+                if not isinstance(value, str):
+                    raise TypeError('Unexpected unsaved file contents.')
+                unsaved_files_array[i].name = name
+                unsaved_files_array[i].contents = value
+                unsaved_files_array[i].length = len(value)
         ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
                 unsaved_files_array, options)
 
@@ -3059,14 +3072,18 @@ class TranslationUnit(ClangObject):
         unsaved_files_array = 0
         if len(unsaved_files):
             unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
-            for i,(name,contents) in enumerate(unsaved_files):
-                if hasattr(contents, "read"):
-                    contents = contents.read()
-                contents = b(contents)
-                unsaved_files_array[i].name = b(fspath(name))
-                unsaved_files_array[i].contents = contents
-                unsaved_files_array[i].length = len(contents)
-        ptr = conf.lib.clang_codeCompleteAt(self, fspath(path), line, column,
+            for i,(name,value) in enumerate(unsaved_files):
+                if not isinstance(value, str):
+                    # FIXME: It would be great to support an efficient version
+                    # of this, one day.
+                    value = value.read()
+                    print(value)
+                if not isinstance(value, str):
+                    raise TypeError('Unexpected unsaved file contents.')
+                unsaved_files_array[i].name = name
+                unsaved_files_array[i].contents = value
+                unsaved_files_array[i].length = len(value)
+        ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
                 unsaved_files_array, len(unsaved_files), options)
         if ptr:
             return CodeCompletionResults(ptr)

+ 13 - 13
tools/font/fontgen.py

@@ -103,7 +103,7 @@ MAX_GLYPHS = 256
 def grouper(n, iterable, fillvalue=None):
     """grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
     args = [iter(iterable)] * n
-    return itertools.izip_longest(fillvalue=fillvalue, *args)
+    return itertools.zip_longest(fillvalue=fillvalue, *args)
 
 def hasher(codepoint, num_glyphs):
     return (codepoint % num_glyphs)
@@ -123,7 +123,7 @@ class Font:
         self.legacy = legacy
         self.face = freetype.Face(self.ttf_path)
         self.face.set_pixel_sizes(0, self.max_height)
-        self.name = self.face.family_name + "_" + self.face.style_name
+        self.name = self.face.family_name + b"_" + self.face.style_name
         self.wildcard_codepoint = WILDCARD_CODEPOINT
         self.number_of_glyphs = 0
         self.table_size = HASH_TABLE_SIZE
@@ -166,8 +166,8 @@ class Font:
     def set_regex_filter(self, regex_string):
         if regex_string != ".*":
             try:
-                self.regex = re.compile(unicode(regex_string, 'utf8'), re.UNICODE)
-            except Exception, e:
+                self.regex = re.compile(regex_string)
+            except Exception as e:
                 raise Exception("Supplied filter argument was not a valid regular expression."
                                 "Font: {}".format(self.ttf_path))
         else:
@@ -296,7 +296,7 @@ class Font:
         self.face.load_glyph(gindex, flags)
         # Font metrics
         bitmap = self.face.glyph.bitmap
-        advance = self.face.glyph.advance.x / 64     # Convert 26.6 fixed float format to px
+        advance = self.face.glyph.advance.x // 64     # Convert 26.6 fixed float format to px
         advance += self.tracking_adjust
         width = bitmap.width
         height = bitmap.rows
@@ -337,14 +337,14 @@ class Font:
                     glyph_packed.append(struct.pack('<I', w))
 
                 # Confirm that we're smaller than the cache size
-                size = ((width * height) + (8 - 1)) / 8
+                size = ((width * height) + (8 - 1)) // 8
                 if size > self.max_glyph_size:
                     raise Exception("Glyph too large! codepoint {}: {} > {}. Font {}".
                                     format(codepoint, size, self.max_glyph_size, self.ttf_path))
 
         glyph_header = struct.pack(self.glyph_header, width, height, left, bottom, advance)
 
-        return glyph_header + ''.join(glyph_packed)
+        return glyph_header + b''.join(glyph_packed)
 
     def fontinfo_bits(self):
         if self.version == FONT_VERSION_2:
@@ -388,7 +388,7 @@ class Font:
                         struct.pack(offset_table_format, codepoint, offset))
                 bucket_sizes[glyph_hash] = bucket_sizes[glyph_hash] + 1
                 if bucket_sizes[glyph_hash] > OFFSET_TABLE_MAX_SIZE:
-                    print "error: %d > 127" % bucket_sizes[glyph_hash]
+                    print("error: %d > 127" % bucket_sizes[glyph_hash])
             return bucket_sizes
 
         def add_glyph(codepoint, next_offset, gindex, glyph_indices_lookup):
@@ -410,7 +410,7 @@ class Font:
         def codepoint_is_in_subset(codepoint):
            if (codepoint not in (WILDCARD_CODEPOINT, ELLIPSIS_CODEPOINT)):
               if self.regex is not None:
-                  if self.regex.match(unichr(codepoint)) is None:
+                  if self.regex.match(codepoint.to_bytes(2)) is None:
                       return False
               if codepoint not in self.codepoints:
                  return False
@@ -439,7 +439,7 @@ class Font:
                 raise Exception('Wildcard codepoint is used for something else in this font.'
                                 'Font {}'.format(self.ttf_path))
 
-            if (gindex is 0):
+            if (gindex == 0):
                 raise Exception('0 index is reused by a non wildcard glyph. Font {}'.
                                 format(self.ttf_path))
 
@@ -463,9 +463,9 @@ class Font:
 
     def bitstring(self):
         btstr = self.fontinfo_bits()
-        btstr += ''.join(self.hash_table)
+        btstr += b''.join(self.hash_table)
         for table in self.offset_tables:
-            btstr += ''.join(table)
-        btstr += ''.join(self.glyph_table)
+            btstr += b''.join(table)
+        btstr += b''.join(self.glyph_table)
 
         return btstr

+ 2 - 1
tools/fw_binary_info.py

@@ -17,6 +17,7 @@
 from binascii import crc32
 import os
 import struct
+from functools import reduce
 
 import stm32_crc
 
@@ -103,7 +104,7 @@ class PebbleFirmwareBinaryInfo(object):
 
         # Trim leading NULLS on the strings:
         for k in ["version_tag", "version_short"]:
-            self.info[k] = self.info[k].rstrip("\x00")
+            self.info[k] = self.info[k].rstrip(b"\x00")
 
     def __str__(self):
         return str(self.info)

+ 2 - 2
tools/generate_appinfo.py

@@ -38,7 +38,7 @@ def generate_appinfo_c(app_info, output_filename, platform_name=None):
         app_uuid = uuid.UUID(app_info['uuid'])
     except KeyError:
         raise Exception('Could not find $.uuid in appinfo.json')
-    uuid_initializer_string = '{ %s }' % ", ".join(["0x%02X" % ord(b) for b in app_uuid.bytes])
+    uuid_initializer_string = '{ %s }' % ", ".join(["0x%02X" % b for b in app_uuid.bytes])
 
     try:
         name = app_info['shortName']
@@ -138,7 +138,7 @@ def generate_appinfo_c(app_info, output_filename, platform_name=None):
             company=company_name,
             icon_resource_id=icon_resource_id,
             flags=flags_string,
-            uuid=uuid_initializer_string).encode('utf-8'))
+            uuid=uuid_initializer_string))
 
 PEBBLE_APP_INFO_TEMPLATE = string.Template("""
 const PebbleProcessInfo __pbl_app_info __attribute__ ((section (".pbl_header"))) = {

+ 2 - 2
tools/generate_c_byte_array.py

@@ -15,8 +15,8 @@
 
 def write(output_file, bytes, var_name):
     output_file.write("static const uint8_t {var_name}[] = {{\n  ".format(var_name=var_name))
-    for byte, index in zip(bytes, xrange(0, len(bytes))):
+    for byte, index in zip(bytes, range(0, len(bytes))):
         if index != 0 and index % 16 == 0:
             output_file.write("/* bytes {0} - {1} */\n  ".format(index - 16, index))
-        output_file.write("0x%02x, " % ord(byte))
+        output_file.write("0x%02x, " % byte)
     output_file.write("\n};\n")

+ 0 - 0
tools/generate_native_sdk/__init__.py


+ 5 - 3
tools/generate_native_sdk/extract_symbol_info.py

@@ -16,9 +16,11 @@ import functools
 import os
 import sys
 
-sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
-import parse_c_decl
-from parse_c_decl import clang
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(os.path.dirname(SCRIPT_DIR))
+
+from generate_native_sdk import parse_c_decl
+import clang
 
 def extract_exported_functions(node, functions=[], types=[], defines=[]):
     def update_matching_export(exports, node):

+ 7 - 2
tools/generate_native_sdk/generate_pebble_native_sdk_files.py

@@ -21,6 +21,7 @@ import os
 import os.path as path
 import shutil
 import argparse
+from functools import cmp_to_key
 
 from generate_app_header import make_app_header
 from generate_app_shim import make_app_shim_lib
@@ -36,7 +37,7 @@ import exports
 # need to append the parent directory to the system PATH because relative imports won't work
 try:
     from ..pebble_sdk_platform import pebble_platforms, maybe_import_internal
-except ValueError:
+except ImportError:
     os.sys.path.append(path.dirname(path.dirname(__file__)))
     from pebble_sdk_platform import pebble_platforms, maybe_import_internal
 
@@ -117,12 +118,16 @@ Hint: Add appropriate headers to the \"files\" array in exported_symbols.json"""
 
 
     def function_export_compare_func(x, y):
+        def cmp(a, b):
+            return (a > b) - (a < b)
+
         if (x.added_revision != y.added_revision):
             return cmp(x.added_revision, y.added_revision)
 
         return cmp(x.sort_name, y.sort_name)
 
-    sorted_functions = sorted(functions, cmp=function_export_compare_func)
+    sorted_functions = sorted(functions,
+                              key=cmp_to_key(function_export_compare_func))
 
     # Build libpebble.a for our apps to compile against
     make_app_shim_lib(sorted_functions, sdk_lib_dir)

+ 14 - 13
tools/parse_c_decl.py → tools/generate_native_sdk/parse_c_decl.py

@@ -41,12 +41,12 @@ def get_homebrew_llvm_lib_path():
         return None
 
     # Brittleness alert! Grepping output of `brew info llvm` for llvm bin path:
-    m = re.search('.*/llvm-config', o)
+    m = re.search('.*/llvm-config', o.decode("utf8"))
     if m:
         llvm_config_path = m.group(0)
 
         o = subprocess.check_output([llvm_config_path, '--libdir'])
-        llvm_lib_path = o.strip()
+        llvm_lib_path = o.decode("utf8").strip()
 
         # Make sure --enable-clang and --enable-python options were used:
         if os.path.exists(os.path.join(llvm_lib_path, 'libclang.dylib')) and \
@@ -75,14 +75,14 @@ def load_library():
             logging.info("llvm from homebrew not found,"
                          " trying Xcode's instead")
             xcode_path = subprocess.check_output(['xcode-select',
-                                                  '--print-path']).strip()
+                                                  '--print-path']).decode("utf8").strip()
             libclang_path = \
                 os.path.join(xcode_path,
                              'Toolchains/XcodeDefault.xctoolchain/usr/lib')
         clang.cindex.conf.set_library_path(libclang_path)
     elif sys.platform == 'linux2':
         libclang_path = subprocess.check_output(['llvm-config',
-                                                 '--libdir']).strip()
+                                                 '--libdir']).decode("utf8").strip()
         clang.cindex.conf.set_library_path(libclang_path)
 
     libclang_lib = clang.cindex.conf.lib
@@ -145,18 +145,19 @@ def get_string_from_file(source_range):
     if source_range_file is None:
         return None
 
-    with open(source_range_file.name) as f:
+    with open(source_range_file.name, "rb") as f:
         f.seek(source_range.start.offset)
-        return f.read(source_range.end.offset - source_range.start.offset)
+        return f.read(source_range.end.offset -
+                      source_range.start.offset).decode("utf8")
 
 def dump_node(node, indent_level=0):
     spelling = node.spelling
     if node.kind == clang.cindex.CursorKind.MACRO_DEFINITION:
         spelling = get_node_spelling(node)
 
-    print "%*s%s> %s" % (indent_level * 2, "", node.kind, spelling)
-    print "%*sRange:   %s" % (4 + (indent_level * 2), "", str(node.extent))
-    print "%*sComment: %s" % (4 + (indent_level * 2), "", str(get_comment_range_for_decl(node)))
+    print("%*s%s> %s" % (indent_level * 2, "", node.kind, spelling))
+    print("%*sRange:   %s" % (4 + (indent_level * 2), "", str(node.extent)))
+    print("%*sComment: %s" % (4 + (indent_level * 2), "", str(get_comment_range_for_decl(node))))
 
 def return_true(node):
     return True
@@ -194,7 +195,7 @@ def extract_declarations(tu, filenames, func):
 
 
 def parse_file(filename, filenames, func, internal_sdk_build=False, compiler_flags=None):
-    src_dir = os.path.join(os.path.dirname(__file__), "../src")
+    src_dir = os.path.join(os.path.dirname(__file__), "../../src")
 
     args = [ "-I%s/core" % src_dir,
              "-I%s/include" % src_dir,
@@ -228,7 +229,7 @@ def parse_file(filename, filenames, func, internal_sdk_build=False, compiler_fla
     # this workaround should be removed when fixed in newlib
     cmd = ['clang'] + ['-dM', '-E', '-']
     try:
-        out = subprocess.check_output(cmd, stdin=open('/dev/null')).strip()
+        out = subprocess.check_output(cmd, stdin=open('/dev/null')).decode("utf8").strip()
         if not isinstance(out, str):
             out = out.decode(sys.stdout.encoding or 'iso8859-1')
     except Exception as err:
@@ -247,14 +248,14 @@ def parse_file(filename, filenames, func, internal_sdk_build=False, compiler_fla
     args.insert(0, r"-D_TIME_H_")
 
     # Try and find our arm toolchain and use the headers from that.
-    gcc_path = subprocess.check_output(['which', 'arm-none-eabi-gcc']).strip()
+    gcc_path = subprocess.check_output(['which', 'arm-none-eabi-gcc']).decode("utf8").strip()
     include_path = os.path.join(os.path.dirname(gcc_path), '../arm-none-eabi/include')
     args.append("-I%s" % include_path)
 
     # Find the arm-none-eabi-gcc libgcc path including stdbool.h
     cmd = ['arm-none-eabi-gcc'] + ['-E', '-v', '-xc', '-']
     try:
-        out = subprocess.check_output(cmd, stdin=open('/dev/null'), stderr=subprocess.STDOUT).strip().splitlines()
+        out = subprocess.check_output(cmd, stdin=open('/dev/null'), stderr=subprocess.STDOUT).decode("utf8").strip().splitlines()
         if '#include <...> search starts here:' in out:
             libgcc_include_path = out[out.index('#include <...> search starts here:') + 1].strip()
             args.append("-I%s" % libgcc_include_path)

+ 2 - 2
tools/generate_pdcs/json2commands.py

@@ -29,9 +29,9 @@ The fill color is currently defaulted to solid white.
 
 import os
 import argparse
-import pebble_commands
+from . import pebble_commands
 import json
-import graph
+from . import graph
 from itertools import groupby
 
 INVISIBLE_POINT_THRESHOLD = 500

+ 6 - 6
tools/generate_pdcs/pdc_gen.py

@@ -23,9 +23,9 @@ Size (4 bytes) - size of PDC image or sequence following the header in bytes
 import os
 import argparse
 
-import pebble_commands
-import svg2commands
-import json2commands
+from . import pebble_commands
+from . import svg2commands
+from . import json2commands
 
 
 def create_pdc_data_from_path(path, viewbox_size, verbose, duration, play_count,
@@ -37,7 +37,7 @@ def create_pdc_data_from_path(path, viewbox_size, verbose, duration, play_count,
         raise Exception("Invalid path")
 
     if verbose:
-        print path + ":"
+        print(path + ":")
     if os.path.isfile(path):
         dir_name = os.path.dirname(path)
     frames = []
@@ -108,9 +108,9 @@ def main(args):
     errors = create_pdc_from_path(path, args.output, viewbox_size, args.verbose, args.duration,
                                   args.play_count, args.precise)
     if errors:
-        print "Errors in the following files or frames:"
+        print("Errors in the following files or frames:")
         for ef in errors:
-            print "\t" + str(ef)
+            print("\t" + str(ef))
 
 
 if __name__ == '__main__':

+ 8 - 7
tools/generate_pdcs/pebble_commands.py

@@ -79,8 +79,9 @@ def convert_to_pebble_coordinates(point, verbose=False, precise=False):
 
     valid = compare_points(point, nearest)
     if not valid and verbose:
-        print "Invalid point: ({}, {}). Closest supported coordinate: ({}, {})".format(point[0], point[1],
-                                                                                       nearest[0], nearest[1])
+        print("Invalid point: ({}, {}). Closest supported coordinate: ({}, {})".format(point[0], point[1],
+                                                                                       nearest[0],
+                                                                                       nearest[1]))
 
     translated = sum_points(point, (-0.5, -0.5))   # translate point by (-0.5, -0.5)
     if precise:
@@ -103,7 +104,7 @@ def convert_color(r, g, b, a, truncate=True):
 
     valid = valid_color(r, g, b, a)
     if not valid:
-        print "Invalid color: ({}, {}, {}, {})".format(r, g, b, a)
+        print("Invalid color: ({}, {}, {}, {})".format(r, g, b, a))
         return 0
 
     if truncate:
@@ -243,12 +244,12 @@ def serialize(commands):
 
 def print_commands(commands):
     for c in commands:
-        print str(c)
+        print(str(c))
 
 
 def print_frames(frames):
     for i in range(len(frames)):
-        print 'Frame {}:'.format(i + 1)
+        print('Frame {}:'.format(i + 1))
         print_commands(frames[i])
 
 
@@ -265,7 +266,7 @@ def serialize_sequence(frames, size, duration, play_count):
     for f in frames:
         s += serialize_frame(f, duration)
 
-    output = "PDCS"
+    output = b"PDCS"
     output += pack('I', len(s))
     output += s
     return output
@@ -275,7 +276,7 @@ def serialize_image(commands, size):
     s = pack_header(size)
     s += serialize(commands)
 
-    output = "PDCI"
+    output = b"PDCI"
     output += pack('I', len(s))
     output += s
     return output

+ 9 - 9
tools/generate_pdcs/svg2commands.py

@@ -24,7 +24,7 @@ g, layer, path, rect, polyline, polygon, line, circle,
 import xml.etree.ElementTree as ET
 import svg.path
 import glob
-import pebble_commands
+from . import pebble_commands
 
 xmlns = '{http://www.w3.org/2000/svg}'
 
@@ -42,14 +42,14 @@ def get_translate(group):
     if trans is not None:
         pos = trans.find('translate')
         if pos < 0:
-            print "No translation in translate"
+            print("No translation in translate")
             return 0, 0
 
         import ast
         try:
             return ast.literal_eval(trans[pos + len('translate'):])
         except (ValueError, TypeError):
-            print "translate contains unsupported elements in addition to translation"
+            print("translate contains unsupported elements in addition to translation")
 
     return 0, 0
 
@@ -97,7 +97,7 @@ def parse_path(element, translate, stroke_width, stroke_color, fill_color, verbo
         path = svg.path.parse_path(d)
         points = [(lambda l: (l.real, l.imag))(line.start) for line in path]
         if not points:
-            print "No points in parsed path"
+            print("No points in parsed path")
             return None
 
         path_open = path[-1].end != path[0].start
@@ -112,7 +112,7 @@ def parse_path(element, translate, stroke_width, stroke_color, fill_color, verbo
         return pebble_commands.PathCommand(points, path_open, translate, stroke_width, stroke_color,
                                            fill_color, verbose, precise, raise_error)
     else:
-        print "Path element does not have path attribute"
+        print("Path element does not have path attribute")
 
 
 def parse_circle(element, translate, stroke_width, stroke_color, fill_color, verbose, precise,
@@ -129,9 +129,9 @@ def parse_circle(element, translate, stroke_width, stroke_color, fill_color, ver
             return pebble_commands.CircleCommand(center, radius, translate, stroke_width,
                                                  stroke_color, fill_color, verbose)
         except ValueError:
-            print "Unrecognized circle format"
+            print("Unrecognized circle format")
     else:
-        print "Unrecognized circle format"
+        print("Unrecognized circle format")
 
 
 def parse_polyline(element, translate, stroke_width, stroke_color, fill_color, verbose, precise,
@@ -220,7 +220,7 @@ def create_command(translate, element, verbose=False, precise=False, raise_error
                                        verbose, precise, raise_error)
     except KeyError:
         if tag != 'g' and tag != 'layer':
-            print "Unsupported element: " + tag
+            print("Unsupported element: " + tag)
 
     return None
 
@@ -229,7 +229,7 @@ def get_commands(translate, group, verbose=False, precise=False, raise_error=Fal
                  truncate_color=True):
     commands = []
     error = False
-    for child in group.getchildren():
+    for child in list(group):
         # ignore elements that are marked display="none"
         display = child.get('display')
         if display is not None and display == 'none':

+ 1 - 1
tools/log_hashing/check_elf_log_strings.py

@@ -147,5 +147,5 @@ if __name__ == '__main__':
     output = check_elf_log_strings(args.elf_path)
 
     if output:
-        print output
+        print(output)
         sys.exit(1)

+ 3 - 3
tools/log_hashing/logdehash.py

@@ -23,7 +23,7 @@ from datetime import datetime
 import unicodedata as ud
 
 from pebble.loghashing import newlogging
-from newlogging import get_log_dict_from_file
+from .newlogging import get_log_dict_from_file
 
 LOG_DICT_KEY_CORE_ID = 'core_'
 
@@ -105,10 +105,10 @@ class LogDehash(object):
         if not self.loghash_dict:
             return
 
-        print 'Supported Cores:'
+        print('Supported Cores:')
         for key in sorted(self.loghash_dict, key=self.loghash_dict.get):
             if key.startswith(LOG_DICT_KEY_CORE_ID):
-                print '    {}: {}'.format(key, self.loghash_dict[key])
+                print('    {}: {}'.format(key, self.loghash_dict[key]))
 
     def update_log_string_metrics(self):
         if not self.loghash_dict:

+ 2 - 2
tools/mpu_calc.py

@@ -80,8 +80,8 @@ def find_subregions_for_region(address, size):
 
             block_start_addresss = address - start_in_block
 
-            start_enabled_subregion = start_in_block / subregion_size
-            end_enabled_subregion = end_in_block / subregion_size
+            start_enabled_subregion = start_in_block // subregion_size
+            end_enabled_subregion = end_in_block // subregion_size
 
             disabled_subregions = bitarray(8, endian='little')
             disabled_subregions.setall(True)

+ 8 - 9
tools/pbpack.py

@@ -79,12 +79,12 @@ class ResourcePack(object):
     def serialize_table(self):
         # Serialize these entries into table_data
         cur_file_id = 1
-        table_data = ''
+        table_data = b''
         for cur_file_id, table_entry in enumerate(self.table_entries, start=1):
             table_data += table_entry.serialize(cur_file_id)
 
         # Pad the rest of the table_data up to table_size
-        for i in xrange(cur_file_id, self.table_size):
+        for i in range(cur_file_id, self.table_size):
             table_data += ResourcePackTableEntry(0, 0, 0, 0).serialize(0)
 
         return table_data
@@ -119,7 +119,7 @@ class ResourcePack(object):
 
         # Parse table entries:
         resource_pack.table_entries = []
-        for n in xrange(num_files):
+        for n in range(num_files):
             table_entry = f_in.read(cls.TABLE_ENTRY_SIZE_BYTES)
             file_id, entry = ResourcePackTableEntry.deserialize(table_entry)
 
@@ -214,8 +214,7 @@ class ResourcePack(object):
 
         f_out.write(self.serialize_manifest(self.crc))
         f_out.write(self.serialize_table())
-        for c in self.serialize_content():
-            f_out.write(c)
+        f_out.write(self.serialize_content())
 
         return self.crc
 
@@ -244,11 +243,11 @@ class ResourcePack(object):
         Dump a bunch of information about this pbpack to stdout
         """
 
-        print 'Manifest CRC: 0x%x' % self.crc
-        print 'Calculated CRC: 0x%x' % self.get_content_crc()
-        print 'Num Items: %u' % len(self.table_entries)
+        print('Manifest CRC: 0x%x' % self.crc)
+        print('Calculated CRC: 0x%x' % self.get_content_crc())
+        print('Num Items: %u' % len(self.table_entries))
         for i, entry in enumerate(self.table_entries, start=1):
-            print '  %u: Offset %u Length %u CRC 0x%x' % (i, entry.offset, entry.length, entry.crc)
+            print('  %u: Offset %u Length %u CRC 0x%x' % (i, entry.offset, entry.length, entry.crc))
 
     def __init__(self, is_system):
         self.table_size = 512 if is_system else 256

+ 8 - 8
tools/pebble_image_routines.py

@@ -42,15 +42,15 @@ def nearest_color_to_pebble64_palette(r, g, b, a):
     returns closest rgba32 color triplet (r, g, b, a)
     """
 
-    a = ((a + 42) / 85) * 85  # fast nearest alpha for 2bit color range
+    a = ((a + 42) // 85) * 85  # fast nearest alpha for 2bit color range
     # clear transparent pixels (makes image more compress-able)
     # and required for greyscale tests
     if a == 0:
         r, g, b = (0, 0, 0)
     else:
-        r = ((r + 42) / 85) * 85  # nearest for 2bit color range
-        g = ((g + 42) / 85) * 85  # nearest for 2bit color range
-        b = ((b + 42) / 85) * 85  # nearest for 2bit color range
+        r = ((r + 42) // 85) * 85  # nearest for 2bit color range
+        g = ((g + 42) // 85) * 85  # nearest for 2bit color range
+        b = ((b + 42) // 85) * 85  # nearest for 2bit color range
 
     return r, g, b, a
 
@@ -81,15 +81,15 @@ def truncate_color_to_pebble64_palette(r, g, b, a):
     returns the truncated color as a rgba32 color triplet (r, g, b, a)
     """
 
-    a = (a / 85) * 85  # truncate alpha for 2bit color range
+    a = (a // 85) * 85  # truncate alpha for 2bit color range
     # clear transparent pixels (makes image more compress-able)
     # and required for greyscale tests
     if a == 0:
         r, g, b = (0, 0, 0)
     else:
-        r = (r / 85) * 85  # truncate for 2bit color range
-        g = (g / 85) * 85  # truncate for 2bit color range
-        b = (b / 85) * 85  # truncate for 2bit color range
+        r = (r // 85) * 85  # truncate for 2bit color range
+        g = (g // 85) * 85  # truncate for 2bit color range
+        b = (b // 85) * 85  # truncate for 2bit color range
 
     return r, g, b, a
 

+ 4 - 4
tools/png2pblpng.py

@@ -16,7 +16,7 @@
 
 import png
 import itertools
-import StringIO
+from io import BytesIO
 
 import pebble_image_routines
 
@@ -52,7 +52,7 @@ def convert_png_to_pebble_png_bytes(input_filename, palette_name,
     output_png, image_data = _convert_png_to_pebble_png_writer(
         input_filename, palette_name, color_reduction_method, force_bitdepth=bitdepth)
 
-    output_str = StringIO.StringIO()
+    output_str = BytesIO()
     output_png.write_array(output_str, image_data)
 
     return output_str.getvalue()
@@ -211,10 +211,10 @@ def get_palette_for_png(input_filename, palette_name, color_reduction_method):
 
 
 def grouper(iterable, n, fillvalue=None):
-    from itertools import izip_longest
+    from itertools import zip_longest
 
     args = [iter(iterable)] * n
-    return izip_longest(fillvalue=fillvalue, *args)
+    return zip_longest(fillvalue=fillvalue, *args)
 
 
 def get_ideal_palette(is_color=False):

+ 1 - 1
tools/pulse_console.py

@@ -33,7 +33,7 @@ def erase_current_line():
 
 
 def handle_prompt_command(interface):
-    cmd = raw_input(PROMPT_STRING)
+    cmd = input(PROMPT_STRING)
     if not cmd:
         return
 

+ 2 - 2
tools/resources/find_resource_filename.py

@@ -58,8 +58,8 @@ def find_most_specific_filename(bld, env, root_node, general_filename):
     if len(specificities) == 0:
         return general_filename
 
-    top_score = max(specificities.itervalues())
-    top_candidates = [k for k, v in specificities.iteritems() if v == top_score]
+    top_score = max(specificities.values())
+    top_candidates = [k for k, v in specificities.items() if v == top_score]
     if len(top_candidates) > 1:
         bld.fatal("The correct file for {general} on {platform} is ambiguous: {count} files have "
                   "specificity {score}:\n\t{files}".format(general=general_filename,

+ 2 - 2
tools/resources/resource_map/resource_generator_vibe.py

@@ -19,7 +19,7 @@ from pebble_sdk_platform import pebble_platforms
 
 import json2vibe
 
-import StringIO
+from io import BytesIO
 
 
 class VibeResourceGenerator(ResourceGenerator):
@@ -27,7 +27,7 @@ class VibeResourceGenerator(ResourceGenerator):
 
     @staticmethod
     def generate_object(task, definition):
-        out = StringIO.StringIO()
+        out = BytesIO()
         json2vibe.convert_to_file(task.inputs[0].abspath(), out)
 
         return ResourceObject(definition, out.getvalue())

+ 1 - 1
tools/resources/types/resource_ball.py

@@ -52,4 +52,4 @@ if __name__ == '__main__':
     rb = ResourceBall.load(args.resball)
 
     for i, o in enumerate(rb.resource_objects, start=1):
-        print "%4u: %-50s %-10s %6u" % (i, o.definition.name, o.definition.type, len(o.data))
+        print("%4u: %-50s %-10s %6u" % (i, o.definition.name, o.definition.type, len(o.data)))

+ 14 - 15
tools/stm32_crc.py

@@ -16,9 +16,9 @@ CRC_POLY = 0x04C11DB7
 
 def precompute_table(bits):
     lookup_table = []
-    for i in xrange(2**bits):
+    for i in range(2**bits):
         rr = i << (32 - bits)
-        for x in xrange(bits):
+        for x in range(bits):
             if rr & 0x80000000:
                 rr = (rr << 1) ^ CRC_POLY
             else:
@@ -31,18 +31,17 @@ lookup_table = precompute_table(8)
 def process_word(data, crc=0xffffffff):
     if (len(data) < 4):
         # The CRC data is "padded" in a very unique and confusing fashion.
-        data = data[::-1] + '\0' * (4 - len(data))
+        data = data[::-1] + b'\0' * (4 - len(data))
 
-    for char in reversed(data):
-        b = ord(char)
+    for b in reversed(data):
         crc = ((crc << 8) ^ lookup_table[(crc >> 24) ^ b]) & 0xffffffff
     return crc
 
 def process_buffer(buf, c=0xffffffff):
-    word_count = (len(buf) + 3) / 4
+    word_count = (len(buf) + 3) // 4
 
     crc = c
-    for i in xrange(word_count):
+    for i in range(word_count):
         crc = process_word(buf[i * 4 : (i + 1) * 4], crc)
     return crc
 
@@ -52,19 +51,19 @@ def crc32(data):
 if __name__ == '__main__':
     import sys
 
-    assert(0x89f3bab2 == process_buffer("123 567 901 34"))
-    assert(0xaff19057 == process_buffer("123456789"))
-    assert(0x519b130 == process_buffer("\xfe\xff\xfe\xff"))
-    assert(0x495e02ca == process_buffer("\xfe\xff\xfe\xff\x88"))
+    assert(0x89f3bab2 == process_buffer(b"123 567 901 34"))
+    assert(0xaff19057 == process_buffer(b"123456789"))
+    assert(0x519b130 == process_buffer(b"\xfe\xff\xfe\xff"))
+    assert(0x495e02ca == process_buffer(b"\xfe\xff\xfe\xff\x88"))
 
-    print "All tests passed!"
+    print("All tests passed!")
 
     # arg1 == path to file to crc
     # arg2 == only crc first N bytes of file specified in arg 1
     if len(sys.argv) >= 2:
         if len(sys.argv) >= 3:
-            b = open(sys.argv[1]).read(int(sys.argv[2]))
+            b = open(sys.argv[1], "rb").read(int(sys.argv[2]))
         else:
-            b = open(sys.argv[1]).read()
+            b = open(sys.argv[1], "rb").read()
         crc = crc32(b)
-        print "%u or 0x%x" % (crc, crc)
+        print("%u or 0x%x" % (crc, crc))

+ 22 - 21
tools/timezones.py

@@ -23,6 +23,7 @@ import string
 import struct
 import sys
 from collections import namedtuple
+from functools import cmp_to_key
 
 # dst-rules are formatted in 2 forms
 # Jordan Oct lastFri 0:00 -
@@ -143,13 +144,13 @@ def dstrules_parse(tzfile):
     wday_dict = {'Sun': 0, 'Mon': 1, 'Tue': 2, 'Wed': 3, 'Thu': 4, 'Fri': 5, 'Sat': 6,
                  'Any': 255}
 
-    with open(tzfile, 'rb') as infile:
+    with open(tzfile, 'r') as infile:
         lines = infile.readlines()
         for line_num, line in enumerate(lines):
-            match_list = re.finditer("^Rule\s+(?P<dstzone>[-A-Za-z]+)\s+[0-9]+\s+max\s+-\s+"
-                                     "(?P<month>[A-Za-z]+)\s+(?P<wday_stuff>[>=A-Za-z0-9]+)\s+"
-                                     "(?P<time>[:0-9]+)(?P<timemode>[swugz]*)\s+[:0-9su]+\s+"
-                                     "(?P<DS>[DS-])", line)
+            match_list = re.finditer(r"^Rule\s+(?P<dstzone>[-A-Za-z]+)\s+[0-9]+\s+max\s+-\s+"
+                                     r"(?P<month>[A-Za-z]+)\s+(?P<wday_stuff>[>=A-Za-z0-9]+)\s+"
+                                     r"(?P<time>[:0-9]+)(?P<timemode>[swugz]*)\s+[:0-9su]+\s+"
+                                     r"(?P<DS>[DS-])", line)
             if match_list:
                 for match in match_list:
                     try:
@@ -195,7 +196,7 @@ def dstrules_parse(tzfile):
                                        int(mday), int(hour), int(minute))
                     dstrule_list.append(new_rule)
 
-    dstrule_list.sort(cmp=dstrule_cmp)
+    dstrule_list.sort(key=cmp_to_key(dstrule_cmp))
 
     return dstrule_list
 
@@ -210,7 +211,7 @@ def build_zoneinfo_list(tzfile):
 
     zoneinfo_list = []
 
-    with open(tzfile, 'rb') as infile:
+    with open(tzfile, 'r') as infile:
         lines = infile.readlines()
         region = ""
         continent = ""
@@ -229,8 +230,8 @@ def build_zoneinfo_list(tzfile):
             # information.
 
             if line.startswith("Zone"):
-                match = re.match("^Zone\s+"
-                                 "(?P<continent>[A-Za-z]+)\/(?P<region>[-_\/A-Za-z]+)", line)
+                match = re.match(r"^Zone\s+"
+                                 r"(?P<continent>[A-Za-z]+)\/(?P<region>[-_\/A-Za-z]+)", line)
                 if match:
                     continent = match.group("continent")
                     region = match.group("region")
@@ -257,16 +258,16 @@ def build_zoneinfo_list(tzfile):
                         region = ""
 
             # Now look to see if we've found the final line of the block
-            match = re.match("^(Zone\s+[-_\/A-Za-z]+\s+|\s+)"  # Leading spaces or zone name
+            match = re.match(r"^(Zone\s+[-_\/A-Za-z]+\s+|\s+)"  # Leading spaces or zone name
                              # The gmt offset (like 4:00 or -3:30)
-                             "(?P<offset>[-0-9:]+)\s+"
+                             r"(?P<offset>[-0-9:]+)\s+"
                              # The name of the dstrule, such as US, or - if no DST
-                             "(?P<dst_name>[-A-Za-z]+)\s+"
+                             r"(?P<dst_name>[-A-Za-z]+)\s+"
                              # The short name of the timezone, like E%sT (EST or EDT) or VET
                              # Or a GMT offset like +06
-                             "(?P<tz_abbr>([A-Z%s\/]+)|\+\d+)"
+                             r"(?P<tz_abbr>([A-Z%s\/]+)|\+\d+)"
                              # Trailing spaces and comments, no year or dates allowed
-                             "(\s+\#.*)?$",
+                             r"(\s+\#.*)?$",
                              line, re.VERBOSE)
 
             if match and region:
@@ -293,7 +294,7 @@ def zonelink_parse(tzfile):
 
     zonelink_list = []
 
-    with open(tzfile, 'rb') as infile:
+    with open(tzfile, 'r') as infile:
         lines = infile.readlines()
         for line in lines:
             # Parse blocks that look like this
@@ -301,8 +302,8 @@ def zonelink_parse(tzfile):
 
             # It's a link!
             if line.startswith("Link"):
-                match = re.match("^Link\s+(?P<target>[-_\/A-Za-z]+)\s+"
-                                 "(?P<linkname>[-_\/A-Za-z]+)\s*", line)
+                match = re.match(r"^Link\s+(?P<target>[-_\/A-Za-z]+)\s+"
+                                 r"(?P<linkname>[-_\/A-Za-z]+)\s*", line)
                 if match:
                     target = match.group("target")
                     linkname = match.group("linkname")
@@ -346,7 +347,7 @@ def zoneinfo_to_bin(zoneinfo_list, dstrule_list, zonelink_list, output_bin):
         region_id_list.append(continent+"/"+region)
 
         # fixup and output the timezone region name
-        output_bin.write(region.ljust(15, '\0'))  # 15-character region zero padded
+        output_bin.write(region.ljust(15, '\0').encode("utf8"))  # 15-character region zero padded
 
         # fixup the gmt offset to be integer minutes
         if ':' in gmt_offset_minutes:
@@ -365,7 +366,7 @@ def zoneinfo_to_bin(zoneinfo_list, dstrule_list, zonelink_list, output_bin):
         # fix timezone abbreviations that no longer have a DST mode
         if dst_zone not in dstzone_dict:
             tz_abbr.replace('*', 'S')  # remove
-        output_bin.write(tz_abbr.ljust(5, '\0'))  # 5-character region zero padded
+        output_bin.write(tz_abbr.ljust(5, '\0').encode("utf8"))  # 5-character region zero padded
 
         # dst table entry, 0 for NONE (ie. dash '-')
         if dst_zone in dstzone_dict:
@@ -389,7 +390,7 @@ def zoneinfo_to_bin(zoneinfo_list, dstrule_list, zonelink_list, output_bin):
             output_bin.write(bytearray(16))
         else:
             for dstrule in dstrules:
-                output_bin.write(struct.pack('c', dstrule.ds))
+                output_bin.write(struct.pack('c', dstrule.ds.encode("utf8")))
                 output_bin.write(struct.pack('B', dstrule.wday))
                 output_bin.write(struct.pack('B', dstrule.flag))
                 output_bin.write(struct.pack('B', dstrule.month))
@@ -407,7 +408,7 @@ def zoneinfo_to_bin(zoneinfo_list, dstrule_list, zonelink_list, output_bin):
             print("Couldn't find region, skipping:", e)
             continue
         output_bin.write(struct.pack('H', region_id))
-        output_bin.write(linkname.ljust(TIMEZONE_LINK_NAME_LENGTH, '\0'))
+        output_bin.write(linkname.ljust(TIMEZONE_LINK_NAME_LENGTH, '\0').encode("utf8"))
 
 
 def build_zoneinfo_dict(olson_database):

+ 8 - 8
tools/tool_check.py

@@ -16,7 +16,7 @@ import re
 import sh
 import subprocess
 import sys
-from pkg_resources import parse_version
+from packaging import version
 
 from waflib import Logs
 
@@ -50,7 +50,7 @@ def tool_check():
         brew_req_list = text_to_req_list(brew_req_text)
 
     brew_installed_text = subprocess.check_output(['brew', 'list'])
-    brew_installed_dict = installed_list_to_dict(text_to_req_list(brew_installed_text))
+    brew_installed_dict = installed_list_to_dict(text_to_req_list(brew_installed_text.decode("utf8")))
 
     for req in brew_req_list:
         check_requirement(req, brew_installed_dict)
@@ -111,19 +111,19 @@ def check_requirement(req, installed):
         # No version/comparison
         return
 
-    version = parse_version(installed[req[0]])
+    ver = version.parse(installed[req[0]])
     success = True
 
     if req[1] == '==':
-        success = version == parse_version(req[2])
+        success = ver == version.parse(req[2])
     elif req[1] == '<=':
-        success = version <= parse_version(req[2])
+        success = ver <= version.parse(req[2])
     elif req[1] == '>=':
-        success = version >= parse_version(req[2])
+        success = ver >= version.parse(req[2])
     elif req[1] == '<':
-        success = version < parse_version(req[2])
+        success = ver < version.parse(req[2])
     elif req[1] == '>':
-        success = version > parse_version(req[2])
+        success = ver > version.parse(req[2])
     else:
         Logs.pprint('RED', 'Don\'t understand comparison \'%s\'' % req[1])
 

+ 4 - 4
waftools/generate_timezone_data.py

@@ -19,7 +19,7 @@ import tools.timezones
 from resources.types.resource_definition import ResourceDefinition
 from resources.types.resource_object import ResourceObject
 
-import StringIO
+from io import BytesIO
 
 
 def wafrule(task):
@@ -34,11 +34,11 @@ def generate_resource_object(olson_database):
     dstrule_list = tools.timezones.dstrules_parse(olson_database)
     zonelink_list = tools.timezones.zonelink_parse(olson_database)
 
-    print "{} {} {}".format(len(zoneinfo_list),
+    print("{} {} {}".format(len(zoneinfo_list),
                             len(dstrule_list),
-                            len(zonelink_list))
+                            len(zonelink_list)))
 
-    data_file = StringIO.StringIO()
+    data_file = BytesIO()
     tools.timezones.zoneinfo_to_bin(zoneinfo_list, dstrule_list, zonelink_list, data_file)
 
     reso = ResourceObject(

+ 1 - 1
waftools/gitinfo.py

@@ -28,7 +28,7 @@ def get_git_revision(ctx):
     # Validate that git tag follows the required form:
     # See https://github.com/pebble/tintin/wiki/Firmware,-PRF-&-Bootloader-Versions
     # Note: version_regex.groups() returns sequence ('0', '0', '0', 'suffix'):
-    version_regex = re.search("^v(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:(?:-)(.+))?$", tag)
+    version_regex = re.search(r"^v(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:(?:-)(.+))?$", tag)
     if version_regex:
         # Get version numbers from version_regex.groups() sequence and replace None values with 0
         # e.g. v2-beta11 => ('2', None, None, 'beta11') => ('2', '0', '0')

+ 2 - 2
waftools/ldscript.py

@@ -23,12 +23,12 @@ def process_ldscript(self):
         return
 
     def convert_to_node(node_or_path_str):
-        if isinstance(node_or_path_str, basestring):
+        if isinstance(node_or_path_str, str):
             return self.path.make_node(node_or_path_str)
         else:
             return node_or_path_str
 
-    if isinstance(self.ldscript, basestring) or \
+    if isinstance(self.ldscript, str) or \
        isinstance(self.ldscript, list):
         ldscripts = Utils.to_list(self.ldscript)
     else:  # Assume Nod3

+ 6 - 6
waftools/pebble_arm_gcc.py

@@ -163,7 +163,7 @@ def configure(conf):
       c_warnings.append('-Wno-expansion-to-defined')
       c_warnings.append('-Wno-enum-conversion')
 
-      if not ('13', '0') <= conf.env.CC_VERSION <= ('14', '0', '0'):
+      if not ('13', '0') <= conf.env.CC_VERSION <= ('14', '2', '0'):
         # Verify the toolchain we're using is allowed. This is to prevent us from accidentally
         # building and releasing firmwares that are built in ways we haven't tested.
 
@@ -267,20 +267,20 @@ Or re-configure with the --relax_toolchain_restrictions option. """
     # Set optimization level
     if conf.options.beta:
         optimize_flags = '-Os'
-        print "Beta mode"
+        print("Beta mode")
     elif conf.options.release:
         optimize_flags = '-Os'
-        print "Release mode"
+        print("Release mode")
     elif conf.options.fat_firmware:
         optimize_flags = '-O0'
         conf.env.IS_FAT_FIRMWARE = True
-        print 'Building Fat Firmware (no optimizations, logging enabled)'
+        print('Building Fat Firmware (no optimizations, logging enabled)')
     elif conf.options.gdb:
         optimize_flags = '-Og'
-        print "GDB mode"
+        print("GDB mode")
     else:
         optimize_flags = '-Os'
-        print 'Debug Mode'
+        print('Debug Mode')
 
     conf.env.append_value('CFLAGS', optimize_flags)
     conf.env.append_value('LINKFLAGS', optimize_flags)

+ 2 - 2
waftools/pebble_test.py

@@ -77,8 +77,8 @@ class run_test(Task.Task):
             return
 
         if self.generator.bld.options.show_output:
-            print stdout
-            print stderr
+            print(stdout)
+            print(stderr)
         tup = (test_runme_node, proc.returncode, stdout, stderr, str(timer))
         self.generator.utest_result = tup
 

+ 3 - 3
waftools/show_configure.py

@@ -15,7 +15,7 @@
 from __future__ import print_function
 
 import os
-import pipes
+from shlex import quote
 
 from waflib import ConfigSet, Options
 from waflib.Build import BuildContext
@@ -62,12 +62,12 @@ class show_configure(BuildContext):
         # Configure time environment vars
         for var in ['CFLAGS']:
             if var in env.environ:
-                argv = ['{}={}'.format(var, pipes.quote(env.environ[var]))] + argv
+                argv = ['{}={}'.format(var, quote(env.environ[var]))] + argv
 
         # Persistent environment vars
         for var in ['WAFLOCK']:
             if var in env.environ:
-                argv = ['export {}={};'.format(var, pipes.quote(env.environ[var]))] + argv
+                argv = ['export {}={};'.format(var, quote(env.environ[var]))] + argv
 
         # Print and force waf to complete without further output
         print(' '.join(argv))

+ 1 - 1
waftools/xcode_pebble.py

@@ -408,6 +408,6 @@ else\n\
     pypy waf $ACTION\n\
 fi\n\
 ")
-        os.chmod(xcscript_path, 0755)
+        os.chmod(xcscript_path, 0o0755)
         f.close()
         

+ 31 - 46
wscript

@@ -40,21 +40,6 @@ def truncate(msg):
     return msg
 
 
-def get_comma_separated_args(option, opt, value, parser):
-    setattr(parser.values, option.dest, value.split(','))
-
-
-old_display = waflib.Task.TaskBase.display
-def wrap_display(self):
-    return truncate(old_display(self))
-waflib.Task.TaskBase.display = wrap_display
-
-old_format_error = waflib.Task.TaskBase.format_error
-def wrap_format_error(self):
-    return truncate(old_format_error(self))
-waflib.Task.TaskBase.format_error = wrap_format_error
-
-
 def run_arm_gdb(ctx, elf_node, cmd_str="", target_server_port=3333):
     from tools.gdb_driver import find_gdb_path
     arm_none_eabi_path = find_gdb_path()
@@ -119,7 +104,7 @@ def options(opt):
                    help='Disable automatic reboots when watchdog fires')
     opt.add_option('--test_apps', action='store_true',
                    help='Enables test apps (off by default)')
-    opt.add_option('--test_apps_list', action='callback', type='string', callback=get_comma_separated_args,
+    opt.add_option('--test_apps_list', type=str,
                    help='Specify AppInstallId\'s of the test apps to be compiled with the firmware')
     opt.add_option('--performance_tests', action='store_true',
                    help='Enables instrumentation + apps for performance testing (off by default)')
@@ -197,15 +182,15 @@ def handle_configure_options(conf):
 
     if conf.options.malloc_instrumentation:
         conf.env.append_value('DEFINES', 'MALLOC_INSTRUMENTATION')
-        print "Enabling malloc instrumentation"
+        print("Enabling malloc instrumentation")
 
     if conf.options.qemu:
         conf.env.append_value('DEFINES', 'TARGET_QEMU')
 
     if conf.options.test_apps_list:
         conf.options.test_apps = True
-        conf.env.test_apps_list = conf.options.test_apps_list
-        print "Enabling test apps: " + str(conf.options.test_apps_list)
+        conf.env.test_apps_list = conf.options.test_apps_list.split(",")
+        print("Enabling test apps: " + str(conf.options.test_apps_list))
 
     if conf.options.build_test_apps or conf.options.test_apps:
         conf.env.BUILD_TEST_APPS = True
@@ -232,53 +217,53 @@ def handle_configure_options(conf):
 
     if conf.options.nosleep:
         conf.env.append_value('DEFINES', 'PBL_NOSLEEP')
-        print "Sleep/stop mode disabled"
+        print("Sleep/stop mode disabled")
 
     if conf.options.nostop:
         conf.env.append_value('DEFINES', 'PBL_NOSTOP')
-        print "Stop mode disabled"
+        print("Stop mode disabled")
 
     if conf.options.lowpowerdebug:
         conf.env.append_value('DEFINES', 'LOW_POWER_DEBUG')
-        print "Sleep and Stop mode debugging enabled"
+        print("Sleep and Stop mode debugging enabled")
 
     if conf.options.nowatch:
         conf.env.append_value('DEFINES', 'NO_WATCH_TIMEOUT')
-        print "Watch watchdog disabled"
+        print("Watch watchdog disabled")
 
     if conf.options.nowatchdog:
         conf.env.append_value('DEFINES', 'NO_WATCHDOG')
         conf.env.NO_WATCHDOG = True
-        print "Watchdog reboot disabled"
+        print("Watchdog reboot disabled")
 
     if conf.options.reboot_on_bt_crash:
         conf.env.append_value('DEFINES', 'REBOOT_ON_BT_CRASH=1')
-        print "BT now crash will trigger an MCU reboot"
+        print("BT now crash will trigger an MCU reboot")
 
     if conf.options.test_apps:
         conf.env.append_value('DEFINES', 'ENABLE_TEST_APPS')
-        print "Im in ur firmware, bloatin ur binz! (Test apps enabled)"
+        print("Im in ur firmware, bloatin ur binz! (Test apps enabled)")
 
     if conf.options.performance_tests:
         conf.env.append_value('DEFINES', 'PERFORMANCE_TESTS')
         conf.options.profiler = True
-        print "Instrumentation and apps for performance measurement enabled (enables profiler)"
+        print("Instrumentation and apps for performance measurement enabled (enables profiler)")
 
     if conf.options.verbose_logs:
         conf.env.append_value('DEFINES', 'VERBOSE_LOGGING')
-        print "Verbose logging enabled"
+        print("Verbose logging enabled")
 
     if conf.options.ui_debug:
         conf.env.append_value('DEFINES', 'UI_DEBUG')
 
     if conf.options.no_sandbox or conf.options.qemu:
-        print "Sandbox disabled"
+        print("Sandbox disabled")
     else:
         conf.env.append_value('DEFINES', 'APP_SANDBOX')
 
     if conf.options.bb_large_spi:
         conf.env.append_value('DEFINES', 'LARGE_SPI_FLASH')
-        print "Enabling 8MB BigBoard flash"
+        print("Enabling 8MB BigBoard flash")
 
     if not conf.options.nolog:
         conf.env.append_value('DEFINES', 'PBL_LOG_ENABLED')
@@ -289,13 +274,13 @@ def handle_configure_options(conf):
         conf.env.append_value('DEFINES', 'PROFILE_INTERRUPTS')
         if not conf.options.profiler:
             # Can't profile interrupts without the profiler enabled
-            print "Enabling profiler"
+            print("Enabling profiler")
             conf.options.profiler = True
 
     if conf.options.profiler:
         conf.env.append_value('DEFINES', 'PROFILER')
         if not conf.options.nostop:
-            print "Enable --nostop for accurate profiling."
+            print("Enable --nostop for accurate profiling.")
             conf.env.append_value('DEFINES', 'PBL_NOSTOP')
 
     if conf.options.voice_debug:
@@ -303,34 +288,34 @@ def handle_configure_options(conf):
 
     if conf.options.battery_debug:
         conf.env.append_value('DEFINES', 'BATTERY_DEBUG')
-        print "Enabling higher battery charge voltage."
+        print("Enabling higher battery charge voltage.")
 
     if conf.options.future_ux and not conf.is_tintin():
-        print "Future UX features enabled."
+        print("Future UX features enabled.")
         conf.env.FUTURE_UX = True
 
     conf.env.INTERNAL_SDK_BUILD = bool(conf.options.internal_sdk_build)
     if conf.env.INTERNAL_SDK_BUILD:
-        print "Internal SDK enabled"
+        print("Internal SDK enabled")
 
     if conf.options.force_fit_tintin:
         conf.env.append_value('DEFINES', 'TINTIN_FORCE_FIT')
-        print "Functionality is secondary to usability"
+        print("Functionality is secondary to usability")
 
     if (conf.is_snowy_compatible() and not conf.options.no_lto) or conf.options.lto:
         conf.options.lto = True
-        print "Turning on LTO."
+        print("Turning on LTO.")
 
     if conf.options.no_link:
         conf.env.NO_LINK = True
-        print "Not linking firmware"
+        print("Not linking firmware")
 
     if conf.options.infinite_backlight and 'bb' in conf.options.board:
         conf.env.append_value('DEFINES', 'INFINITE_BACKLIGHT')
-        print "Enabling infinite backlight."
+        print("Enabling infinite backlight.")
 
     if conf.options.bootloader_test in ['stage1', 'stage2']:
-        print "Forcing MFG on for bootloader test build."
+        print("Forcing MFG on for bootloader test build.")
         conf.options.mfg = True
 
     if conf.options.bootloader_test == 'stage1':
@@ -544,9 +529,9 @@ def configure(conf):
     Logs.pprint('CYAN', 'Configuring unit test environment')
     conf.setenv('local', unit_test_env)
 
-    if sys.platform.startswith('linux'):
-        libclang_path = subprocess.check_output(['llvm-config', '--libdir']).strip()
-        conf.env.append_value('INCLUDES', [os.path.join(libclang_path, 'clang/3.2/include/'),])
+    # if sys.platform.startswith('linux'):
+        # libclang_path = subprocess.check_output(['llvm-config', '--libdir']).strip()
+        # conf.env.append_value('INCLUDES', [os.path.join(libclang_path, 'clang/3.2/include/'),])
 
     # The waf clang tool likes to use llvm-ar as it's ar tool, but that doesn't work on our build
     # servers. Fall back to boring old ar. This will populate the 'AR' env variable so future
@@ -1479,10 +1464,10 @@ def ble_send_hci(ctx):
     from waflib import Options
 
     def _dump_hex_array(prefix, hex_array):
-        print prefix + " [",
+        print(prefix + " [")
         for i in range(0, len(hex_array)):
-            print "0x%02x " % hex_array[i],
-        print "]"
+            print("0x%02x " % hex_array[i])
+        print("]")
 
     hci_bytes = [int(i, 16) for i in Options.commands]
     _dump_hex_array("Sent HCI CMD:", hci_bytes)