all main wml tools: use filter(), map() and zip() from Python 3

The main difference is that these functions return generators instead of lists. This actually breaks wmlscope's collision detection, which will be fixed in my next commit.
This commit is contained in:
Elvish_Hunter 2015-08-05 17:12:38 +02:00
parent 6d67feff9f
commit c1a783b038
5 changed files with 6 additions and 1 deletions

View file

@ -21,6 +21,7 @@ Limitations:
"""
from __future__ import print_function, unicode_literals
from future_builtins import filter, map, zip
from functools import total_ordering
import sys, re, copy, codecs

View file

@ -4,6 +4,7 @@ wmltools.py -- Python routines for working with a Battle For Wesnoth WML tree
"""
from __future__ import print_function, unicode_literals
from future_builtins import filter, map, zip
from functools import total_ordering
import collections, codecs

View file

@ -62,6 +62,7 @@ indent already zero; these two conditions strongly suggest unbalanced WML.
"""
from __future__ import print_function, unicode_literals
from future_builtins import filter, map, zip
import sys, os, getopt, filecmp, re, codecs
from wesnoth import wmltools

View file

@ -182,6 +182,7 @@
#
from __future__ import print_function, unicode_literals
from future_builtins import filter, map, zip
import sys, os, re, getopt, string, copy, difflib, time, gzip, codecs
from wesnoth.wmltools import *

View file

@ -94,6 +94,7 @@
# sets the warning level.
from __future__ import print_function, unicode_literals
from future_builtins import filter, map, zip
import sys, os, time, re, getopt, hashlib, glob, codecs
from wesnoth.wmltools import *
@ -448,7 +449,7 @@ Usage: wmlscope [options] dirpath
collisions = []
for (namespace, filename) in xref.filelist.generator():
with open(filename, "rb") as ifp: # this one may be an image or a sound, so don't assume UTF8 encoding
collisions.append(hashlib.md5(ifp.read()).digest())
collisions.append(hashlib.md5(ifp.read()).hexdigest()) # hexdigest can be easily printed, unlike digest
collisions = zip(xref.filelist.flatten(), collisions)
hashcounts = {}
for (n, h) in collisions: