Ahoj,
ten sqlite neni zadnej standard, to jsem si udelal vlastni format, aby
se s tim dalo lepe pracovat... To co je v priloze jsem jeste nekde na
disku nasel. Nemam se toho ujmout? Nerad bych, aby to dopadlo spatne.
Ted se mi to nechce cist jestli uz to delas, ale jestli budes chtit muzu
se na to podivat.
Jinak ano byl to dibavod.
Take jsem premyslel o tom oznaceni vod, ktere jsou v pruniku. Asi by to
nemusel byt problem. Slo by to udelat tak, ze nactu vsechny vody v
cechach a pres nejaky ctvereckovy spatialindex se udela prunik. Pokud
bude prunik s dibavod, muze se entita oznacit nejakym atributem
(obsolete=true), ktery si pak kazdy muze vyfiltrovat a pripadne smazat
nebo naopak atribut odstranit. To bych take asi mohl udelat.
Tomas
Pavel Machek napsal(a):
Ahoj!
Kdysi davno jsi psal:
- Importovany linie jednotlivych toku 5.212.525 bodu 250.612 linii
- Importovany nadrze 1.486.406 bodu 72.026 ploch 822 multipolygonu z
toho 1.522 der (ostrovu)
- Importovany nazvy vcetne identifikatoru pro pripadny merge v
- budoucnu
- Sjednoceny sdilene nody (stejna lokace), ktere neni mozne
simplifikovat 187.921 bodu
- Export relaci pro polygony. Smer polygonu urcuje outer/inner.
- Simplifikace dle zadane hodnoty
Subject byl "import dat povodi Labe" -- jsou to ta sama data jako
dibavod (http://www.dibavod.cz/index.php?id=27) nebo ne?
Python skripty jsem rozchodil, ale ony na vstupu potrebuji .sqlite, a
ja to neumim ze shapefilu do .sqlite dostat :-(. Podle nejakyho navodu
jsem zkusil:
spatialite> .loadshp A04zvm_Melioracni_kanaly A04 UTF-8 2065 the_geom
load shapefile error: cannot open shapefile 'A04zvm_Melioracni_kanaly'
cause: 'A04zvm_Melioracni_kanaly.dbf' contains unsupported
data types
spatialite>
Jeste me napadlo oficialne kontaktovat urad s dotazem o tom
zakonu. Oni
musi oficialne odpovedet a pak bysme 100% vedeli jak to tedy je. Mohl
by
se toho nekdo ujmout?
Zda se ze tohle zvladnul hanoj :).
Pavel
import struct, datetime, itertools, string
def dbfreader(f):
"""Returns an iterator over records in a Xbase DBF file.
The first row returned contains the field names.
The second row contains field specs: (type, size, decimal places).
Subsequent rows contain the data records.
If a record is marked as deleted, it is skipped.
File should be opened for binary reads.
"""
# See DBF format spec at:
# http://www.pgts.com.au/download/public/xbase.htm#DBF_STRUCT
numrec, lenheader, lenrecord = struct.unpack('<xxxxLHH20x', f.read(32))
fields = []
headerPassed = 0
while headerPassed + 32 < lenheader:
name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32))
name = string.strip(name.replace('\0', '')) # eliminate NULs from
string
if ord(typ) == 0:
break
if typ not in ["N", "C", "M", "D", "L", "F"]:
break
fields.append((name, typ, size, deci))
headerPassed += 32
yield [field[0] for field in fields]
yield [tuple(field[1:]) for field in fields]
f.seek(lenheader)
#terminator = f.read(1)
#assert terminator == '\r'
fields.insert(0, ('DeletionFlag', 'C', 1, 0))
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in fields])
fmtsiz = struct.calcsize(fmt)
for i in xrange(numrec):
record = struct.unpack(fmt, f.read(fmtsiz))
if record[0] != ' ':
continue # deleted record
result = []
for (name, typ, size, deci), value in itertools.izip(fields, record):
if name == 'DeletionFlag':
continue
if typ == "N":
value = value.replace('\0', '').lstrip()
if value == '':
value = 0
elif deci:
value = float(value)
else:
# if int(value) != long(value):
# value = long(value)
# else:
value = int(value)
elif typ == 'F':
value = float(value)
elif typ == 'D':
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = datetime.date(y, m, d)
elif typ == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F')
or '?'
result.append(value)
yield result
def dbfwriter(f, fieldnames, fieldspecs, records):
""" Return a string suitable for writing directly to a binary dbf file.
File f should be open for writing in a binary mode.
Fieldnames should be no longer than ten characters and not include \x00.
Fieldspecs are in the form (type, size, deci) where
type is one of:
C for ascii character data
M for ascii character memo data (real memo fields not supported)
D for datetime objects
N for ints or decimal objects
L for logical values 'T', 'F', or '?'
size is the field width
deci is the number of decimal places in the provided decimal object
Records can be an iterable over the records (sequences of field values).
"""
# header info
ver = 3
now = datetime.datetime.now()
yr, mon, day = now.year-1900, now.month, now.day
numrec = len(records)
numfields = len(fieldspecs)
lenheader = numfields * 32 + 33
lenrecord = 1
for field in fieldspecs:
lenrecord += field[1]
hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader,
lenrecord)
f.write(hdr)
# field specs
for name, (typ, size, deci) in itertools.izip(fieldnames, fieldspecs):
name = (name + '\x00' * 11)[:11]
fld = struct.pack('<11sc4xBB14x', name, typ, size, deci)
f.write(fld)
# terminator
f.write('\r')
# records
for record in records:
f.write(' ') # deletion flag
for (typ, size, deci), value in itertools.izip(fieldspecs, record):
if typ == "N":
value = str(value).rjust(size)
elif typ == 'D':
value = value.strftime('%Y%m%d')
elif typ == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
f.write(value)
# End of file
f.write('\x1A')
# -------------------------------------------------------
# Example calls
if __name__ == '__main__':
import sys, csv
from cStringIO import StringIO
from operator import itemgetter
# Read a database
filename = "nosic.dbf"
if len(sys.argv) == 2:
filename = sys.argv[1]
f = open(filename, 'rb')
db = list(dbfreader(f))
f.close()
for record in db:
print record
fieldnames, fieldspecs, records = db[0], db[1], db[2:]
# Alter the database
del records[4]
records.sort(key=itemgetter(4))
# Remove a field
del fieldnames[0]
del fieldspecs[0]
records = [rec[1:] for rec in records]
# Create a new DBF
f = StringIO()
dbfwriter(f, fieldnames, fieldspecs, records)
# Read the data back from the new DBF
print '-' * 20
f.seek(0)
for line in dbfreader(f):
print line
f.close()
# Convert to CSV
print '.' * 20
f = StringIO()
csv.writer(f).writerow(fieldnames)
csv.writer(f).writerows(records)
print f.getvalue()
f.close()
import struct, dbf, cPickle, time
import sqlite3, os.path, math
NULL_SHAPE = 0
POINT_SHAPE = 1
POLYLINE_SHAPE = 3
POLYGON_SHAPE = 5
def pnInPoly(pts, pt):
c = False
j = len(pts) - 1
for i in xrange(len(pts)):
if ((pts[i][1] <= pt[1]) and (pt[1] < pts[j][1])) or ((pts[j][1] <=
pt[1]) and (pt[1] < pts[i][1])):
if pt[0] < (float(pts[j][0] - pts[i][0]) * (pt[1] - pts[i][1]) /
(pts[j][1] - pts[i][1]) + pts[i][0]):
c = not c
j = i
return c
def reader(filename, records = -1):
f = open(filename, 'rb')
f.seek(100)
while 1:
try:
(number, length) = struct.unpack('>ii', f.read(8))
except:
print "end of file..."
break
record = f.read(length * 2)
if ord(record[0]) == NULL_SHAPE:
# Null shape
assert (len(record) == 4)
yield (number, 0, None)
elif ord(record[0]) == POINT_SHAPE:
# Point shape
assert (len(record) == 20)
(typ, x, y) = struct.unpack('<idd', record)
yield (number, typ, (x, y))
elif ord(record[0]) in (POLYLINE_SHAPE, POLYGON_SHAPE):
# Polygon or polyline shape
(typ, bbleft, bbtop, bbright, bbbottom, numParts, numPoints) =
struct.unpack('<iddddii', record[:44])
record = record[44:]
parts = []
for i in xrange(numParts):
parts.append(struct.unpack('<i', record[i*4:(i+1)*4])[0])
record = record[numParts * 4:]
points = []
for i in xrange(numPoints):
points.append(struct.unpack('<dd', record[i * 16:(i + 1) * 16]))
polygon = []
for i in xrange(len(parts)):
if i + 1 >= len(parts):
stop = -1
else:
stop = parts[i + 1]
current = parts[i]
polygonpart = []
while current != stop:
if current >= len(points):
break
polygonpart.append(points[current])
current += 1
polygon.append(polygonpart)
yield (number, typ, polygon)
else:
raise Exception('Unknown shape')
records -= 1
if records == 0:
break
f.close()
def isIn(index, pt):
x = pt[0] / 1000000
y = pt[1] / 1000000
for poly in index.get((x, y), []):
for p in poly[1]:
if pnInPoly(p, pt):
return poly[0]
return None
def jtsk2wgs84(X, Y):
# Prepocet vstupnich udaju
H = 245
# Vypocet zemepisnych souradnic z rovinnych souradnic
a = 6377397.15508
e = 0.081696831215303
n = 0.97992470462083
konst_u_ro = 12310230.12797036
sinUQ = 0.863499969506341
cosUQ = 0.504348889819882
sinVQ = 0.420215144586493
cosVQ = 0.907424504992097
alfa = 1.000597498371542
k = 1.003419163966575
ro = math.sqrt(X * X + Y * Y)
epsilon = 2 * math.atan(Y / (ro + X))
D = epsilon / n
S = 2 * math.atan(math.exp(1 / n * math.log(konst_u_ro / ro))) - math.pi / 2
sinS = math.sin(S)
cosS = math.cos(S)
sinU = sinUQ * sinS - cosUQ * cosS * math.cos(D)
cosU = math.sqrt(1 - sinU * sinU)
sinDV = math.sin(D) * cosS / cosU
cosDV = math.sqrt(1 - sinDV * sinDV)
sinV = sinVQ * cosDV - cosVQ * sinDV
cosV = cosVQ * cosDV + sinVQ * sinDV
Ljtsk = 2 * math.atan(sinV / (1 + cosV)) / alfa
t = math.exp(2 / alfa * math.log((1 + sinU) / cosU / k))
pom = (t - 1) / (t + 1)
while True:
sinB = pom
pom = t * math.exp(e * math.log((1 + e * sinB) / (1 - e * sinB)))
pom = (pom - 1) / (pom + 1)
if abs(pom - sinB) < 1e-15:
break
Bjtsk = math.atan(pom / math.sqrt(1 - pom * pom))
# Pravouhle souradnice ve S-JTSK
a = 6377397.15508
f_1 = 299.152812853
e2 = 1 - (1 - 1 / f_1) * (1 - 1/f_1)
ro = a / math.sqrt(1 - e2 * math.sin(Bjtsk) * math.sin(Bjtsk))
x = (ro + H) * math.cos(Bjtsk) * math.cos(Ljtsk)
y = (ro + H) * math.cos(Bjtsk) * math.sin(Ljtsk)
z = ((1 - e2) * ro + H) * math.sin(Bjtsk)
# Pravouhle souradnice v WGS-84
dx = 570.69
dy = 85.69
dz = 462.84
wz = -5.2611 / 3600 * math.pi / 180
wy = -1.58676 / 3600 * math.pi / 180
wx = -4.99821 / 3600 * math.pi / 180
m = 3.543e-6
xn = dx + (1 + m) * (x + wz * y - wy * z)
yn = dy + (1 + m) * (-wz * x + y + wx * z)
zn = dz + (1 + m) * (wy * x - wx * y + z)
# Geodeticke souradnice v systemu WGS-84
a = 6378137.0
f_1 = 298.257223563
a_b = f_1 / (f_1 - 1)
p = math.sqrt(xn * xn + yn * yn)
e2 = 1 - (1 - 1 / f_1) * (1 - 1/f_1)
theta = math.atan(zn * a_b / p)
st = math.sin(theta)
ct = math.cos(theta)
t = (zn + e2 * a_b * a * st * st * st) / (p - e2 * a * ct * ct * ct)
B = math.atan(t)
L = 2 * math.atan(yn / (p + xn))
H = math.sqrt(1 + t * t) * (p - a / math.sqrt(1 + (1 - e2) * t * t))
# Format vystupnich hodnot
return (180 * L / math.pi , 180 * B / math.pi)
def createTables(cur):
cur.execute("""CREATE TABLE IF NOT EXISTS Nodes(ID_NODE INTEGER PRIMARY KEY
AUTOINCREMENT,
X INTEGER NOT NULL, Y INTEGER NOT NULL)""")
cur.execute("""CREATE TABLE IF NOT EXISTS Lines(ID_LINE INTEGER PRIMARY KEY
AUTOINCREMENT,
ID_DIBAVOD INTEGER)""")
cur.execute("""CREATE TABLE IF NOT EXISTS Areas(ID_AREA INTEGER PRIMARY KEY
AUTOINCREMENT,
ID_DIBAVOD INTEGER)""")
cur.execute("""CREATE TABLE IF NOT EXISTS Relations(ID_RELATION INTEGER
PRIMARY KEY AUTOINCREMENT)""")
cur.execute("""CREATE TABLE IF NOT EXISTS Dibavod(ID_DIBAVOD INTEGER
PRIMARY KEY AUTOINCREMENT,
NAME TEXT, OBJ_ID INTEGER NOT NULL)""")
cur.execute("""CREATE TABLE IF NOT EXISTS LineNodes(ID_LINE INTEGER,
ID_NODE INTEGER, SEQ INTEGER)""")
cur.execute("""CREATE TABLE IF NOT EXISTS LineRelations(ID_RELATION INTEGER,
ID_LINE INTEGER, SEQ INTEGER)""")
cur.execute("""CREATE TABLE IF NOT EXISTS AreaNodes(ID_AREA INTEGER,
ID_NODE INTEGER, SEQ INTEGER)""")
cur.execute("""CREATE TABLE IF NOT EXISTS AreaRelations(ID_RELATION INTEGER,
ID_AREA INTEGER, SEQ INTEGER)""")
def importVody(prefix, nameColumn, idColumn):
global allNodes
con = sqlite3.connect("vody.sqlite")
con.isolation_level = None
cur = con.cursor()
createTables(cur)
cur.execute("BEGIN")
total = 0
allLines = []
lastTyp = None
for (number, typ, obj) in reader(os.path.join('data', prefix + '.shp')):
total += 1
if typ in (POLYLINE_SHAPE, POLYGON_SHAPE):
if typ == POLYLINE_SHAPE:
tablePrefix = "Line"
elif typ == POLYGON_SHAPE:
tablePrefix = "Area"
if lastTyp is None:
lastTyp = typ
assert lastTyp == typ
lineIDs = []
for line in obj:
nodeIDs = []
if typ == POLYGON_SHAPE:
line = line[:-1]
for pt in line:
pt2 = jtsk2wgs84(-pt[1], -pt[0])
pt2 = (int(pt2[0] * 10000000 + 0.5), int(pt2[1] * 10000000
+ 0.5))
if pt2 in allNodes:
nodeID = allNodes[pt2]
else:
cur.execute("insert into Nodes(X, Y) values (?, ?)",
pt2)
nodeID = cur.lastrowid
allNodes[pt2] = nodeID
if len(nodeIDs) > 0 and nodeID == nodeIDs[-1]:
pass
else:
nodeIDs.append(nodeID)
cur.execute("insert into %ss(ID_DIBAVOD) values (NULL)" %
tablePrefix)
lineID = cur.lastrowid
lineIDs.append(lineID)
for i, nodeID in enumerate(nodeIDs):
cur.execute("insert into %sNodes(ID_%s, ID_NODE, SEQ)
values (?, ?, ?)" % (tablePrefix, tablePrefix),
(lineID, nodeID, i + 1))
allLines.append(lineIDs)
if len(obj) > 1:
print "Relations: ", len(obj)
cur.execute("insert into Relations VALUES(NULL)")
relationID = cur.lastrowid
for i, lineID in enumerate(lineIDs):
cur.execute("insert into %sRelations(ID_RELATION, ID_%s,
SEQ) values (?, ?, ?)" % (tablePrefix, tablePrefix),
(relationID, lineID, i + 1))
# break
else:
raise Exception('Unknown shape')
#if total >= 10:
# break
#break
if total % 200 == 0:
print total
print total
f = open(os.path.join('data', prefix + '.dbf'), "rb")
db = dbf.dbfreader(f)
fieldnames = db.next()
fieldspecs = db.next()
inazev = fieldnames.index(nameColumn)
iid = fieldnames.index(idColumn)
i = 0
for row in db:
n = unicode(row[inazev], 'cp1250').strip()
did = long(row[iid])
if len(n) == 0:
n = None
cur.execute("INSERT INTO Dibavod(NAME, OBJ_ID) VALUES (?, ?)",
(n, did))
idDibavod = cur.lastrowid
for line in allLines[i]:
cur.execute("UPDATE %ss SET ID_DIBAVOD=? WHERE ID_%s=?" %
(tablePrefix, tablePrefix),
(idDibavod, line))
i += 1
if i % 200 == 0:
print i
if i == len(allLines):
break
f.close()
cur.execute("COMMIT")
con.close()
if __name__ == '__main__':
allNodes = {}
importVody('A02_Vodni_tok_JU', 'NAZ_TOK', 'UTOKJ_ID')
#importVody('A05_Vodni_nadrze', 'NAZ_NA', 'NADR_GID')
#70402
#71922 - 2341 + 821
import sqlite3, os.path
lonFrom=14.3 * 10000000
lonTo=14.6 * 10000000
latFrom=49.7 * 10000000
latTo=50.0 * 10000000
whereSql = "WHERE X > %.4f AND X < %.4f AND Y > %.4f AND Y < %.4f" % (lonFrom,
lonTo, latFrom, latTo)
#whereSql = ""
fixedNodesInOutput = set()
def polyAreaWS(polygon):
if len(polygon) < 3:
return 0
j = len(polygon) - 1;
area = 0.0
for i in xrange(len(polygon)):
area += float(polygon[j][0]) * polygon[i][1] - float(polygon[i][0]) *
polygon[j][1]
j = i
return area / 2.0;
def isClockwise(polygon):
return polyAreaWS(polygon) < 0;
def isPolyClockwise(idPoly):
cur = con.cursor()
poly = []
for row in cur.execute("""SELECT X, Y FROM AreaNodes
INNER JOIN Nodes ON AreaNodes.ID_NODE=Nodes.ID_NODE
WHERE ID_AREA=?
ORDER BY AreaNodes.SEQ""", (idPoly,)):
poly.append((row['X'], row['Y']))
return isClockwise(poly)
def ex(cur, sql):
print sql
cur.execute(sql)
def createIndex(table, column):
cur = con.cursor()
ex(cur, """CREATE INDEX IF NOT EXISTS i_%s%s ON %s (%s)""" % \
(table, column, table, column))
def getWaysNodes(wayGroup, area):
cur = con.cursor()
typ = area and "AREA" or "LINE"
waysNodes = {}
lastWay = None
for row in cur.execute("""SELECT ID_%s AS ID_WAY, Nodes.ID_NODE, X, Y FROM
%sNodes
INNER JOIN Nodes ON %sNodes.ID_NODE=Nodes.ID_NODE
WHERE ID_%s IN (%s) ORDER BY ID_%s, SEQ""" % (typ, typ, typ, typ,
",".join(map(lambda x: str(x), wayGroup)), typ)):
if lastWay != row['ID_WAY']:
nodes = []
waysNodes[row['ID_WAY']] = nodes
lastWay = row['ID_WAY']
row['X'] /= 10000000.0
row['Y'] /= 10000000.0
nodes.append(row)
return waysNodes
def splitBy(array, groupSize):
temp = []
for i in array:
temp.append(i)
if len(temp) >= groupSize:
yield temp
temp = []
if len(temp) > 0:
yield temp
def getWays(ways, area):
cur = con.cursor()
typ = area and "Area" or "Line"
for wayGroup in splitBy(ways, 1000):
resultWays = {}
for row in cur.execute("""SELECT %ss.ID_%s AS ID_WAY, NAME, OBJ_ID FROM
%ss
INNER JOIN Dibavod ON Dibavod.ID_DIBAVOD=%ss.ID_DIBAVOD
WHERE ID_WAY IN (%s)""" % (typ, typ, typ, typ,
",".join(map(lambda x: str(x), wayGroup)))):
resultWays[row['ID_WAY']] = row
resultNodes = getWaysNodes(wayGroup, area)
assert len(resultNodes) == len(wayGroup)
assert len(resultNodes) == len(resultWays)
for k in resultNodes.keys():
yield (resultWays[k], resultNodes[k])
def dumpWays(prefix, idsLines, tags, area, idOffset):
total = 0
outIndex = 1
if type(prefix) is file:
split = False
out = prefix
else:
split = True
out = None
lastPercent = -1
for (way, nodes) in getWays(idsLines, area):
if out is None:
out = open("%s_%03d.xml" % (prefix, outIndex), "wb")
outIndex += 1
print >> out, """<?xml version='1.0' encoding='UTF-8'?>
<osm version='0.5' generator='vody'>"""
total += 1
percent = 100 * total / len(idsLines)
if percent != lastPercent:
print '%d%%' % percent, total, '/', len(idsLines)
lastPercent = percent
# Print nodes
if len(nodes) <= 1:
print "Warning, line has less than 2 points: ", way['ID_WAY']
continue
for node in nodes:
if node['ID_NODE'] in fixedNodes:
if node['ID_NODE'] in fixedNodesInOutput:
continue
fixedNodesInOutput.add(node['ID_NODE'])
print >> out, """<node id="-%d" lon="%.9f" lat="%.9f" />""" \
% (node['ID_NODE'], node['X'], node['Y'])
# Print way
print >> out, """<way id="-%d">""" % (way['ID_WAY'] + idOffset)
for node in nodes:
print >> out, """<nd ref="-%d" />""" % node['ID_NODE']
if area:
print >> out, """<nd ref="-%d" />""" % nodes[0]['ID_NODE']
# Print tags
for k, v in tags.iteritems():
print >> out, """<tag k="%s" v="%s" />""" % (k, v)
if way['NAME'] is not None:
print >> out, """<tag k="name" v="%s" />""" %
way['NAME'].encode('utf8')
if way['OBJ_ID'] is not None:
print >> out, """<tag k="dibavod:id" v="%d" />""" % way['OBJ_ID']
print >> out, """</way>"""
if split and out.tell() > 10000000:
print >> out, """</osm>"""
out.close()
out = None
if split and out is not None:
print >> out, """</osm>"""
out.close()
out = None
def dumpRels(out, idsRels, tags):
cur = con.cursor()
for rel in idsRels:
# Print relation
print >> out, """<relation id="-%d">""" % rel
for row in cur.execute("""SELECT ID_RELATION, ID_AREA FROM AreaRelations
WHERE ID_RELATION=?
ORDER BY SEQ""", (rel,)):
# Print members
if isPolyClockwise(row['ID_AREA']):
role = "outer"
else:
role = "inner"
print >> out, """<member type="way" ref="-%d" role="%s" />""" %
(1000000 + row['ID_AREA'], role)
# Print tags
for k, v in tags.iteritems():
print >> out, """<tag k="%s" v="%s" />""" % (k, v)
print >> out, """</relation>"""
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def findWaysAndRelsInRect(typ):
cur = con.cursor()
col = ("ID_%s" % typ).upper()
idsLines = set()
for i in cur.execute("""SELECT DISTINCT ID_%s FROM %sNodes
INNER JOIN Nodes on %sNodes.ID_NODE=Nodes.ID_NODE
%s
""" % (typ, typ, typ, whereSql)):
idsLines.add(i[col])
idsRels = set()
for i in cur.execute("""SELECT DISTINCT ID_RELATION FROM %sNodes
INNER JOIN Nodes on %sNodes.ID_NODE=Nodes.ID_NODE
INNER JOIN %sRelations on %sNodes.ID_%s=%sRelations.ID_%s
%s
""" % (typ, typ, typ, typ, typ, typ, typ, whereSql)):
idsRels.add(i['ID_RELATION'])
idsInRels = set()
if len(idsRels) > 0:
for i in cur.execute("""SELECT DISTINCT %s FROM %sRelations
WHERE ID_RELATION IN(%s)""" % (col, typ, ",".join(map(lambda x: str(x),
idsRels)))):
idsInRels.add(i[col])
idsLines = idsLines.difference(idsInRels)
return (idsLines, idsInRels, idsRels)
def findFixedNodes():
global fixedNodes
fixedNodes = set()
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS Fixed AS
SELECT * FROM (SELECT SUM(p) as pocet, ID_NODE FROM
(SELECT ID_NODE, count(*) AS p, 1 FROM AreaNodes GROUP BY ID_NODE
UNION
SELECT ID_NODE, count(*) AS p, 2 FROM LineNodes GROUP BY ID_NODE)
GROUP BY ID_NODE
) WHERE pocet > 1""")
for r in cur.execute("SELECT * FROM Fixed"):
fixedNodes.add(r['ID_NODE'])
print 'fixedNodes: ', len(fixedNodes)
def main():
global con
con = sqlite3.connect("vody.sqlite")
con.row_factory = dict_factory
con.isolation_level = None
cur = con.cursor()
cur.execute("BEGIN")
createIndex("LineNodes", "ID_LINE")
createIndex("AreaNodes", "ID_AREA")
createIndex("LineNodes", "ID_NODE")
createIndex("AreaNodes", "ID_NODE")
createIndex("LineRelations", "ID_LINE")
createIndex("AreaRelations", "ID_AREA")
createIndex("Lines", "ID_DIBAVOD")
createIndex("Areas", "ID_DIBAVOD")
findFixedNodes()
dumpSeparated = False
if dumpSeparated:
(idsLines, idsInRels, idsRels) = findWaysAndRelsInRect("line")
# We do not want lines with relations, so union...
idsLines = idsLines.union(idsInRels)
dumpWays("lines", idsLines, {'waterway' : 'river', 'source' :
'dibavod'}, False, 0)
# At first we dump areas that do not have relation
(idsAreas, idsInRels, idsRels) = findWaysAndRelsInRect("area")
dumpWays("areas", idsAreas, {'natural' : 'water', 'source' :
'dibavod'}, True, 1000000)
# At last we dump areas with relation into one file
out = open("areasWithRelation.xml", "wb")
print >> out, """<?xml version='1.0' encoding='UTF-8'?>
<osm version='0.5' generator='vody'>"""
dumpWays(out, idsInRels, {'natural' : 'water', 'source' : 'dibavod'},
True, 1000000)
dumpRels(out, idsRels, {'source' : 'dibavod', 'type' : 'multipolygon'})
print >> out, """</osm>"""
out.close()
else:
out = open("vody.xml", "wb")
print >> out, """<?xml version='1.0' encoding='UTF-8'?>
<osm version='0.5' generator='vody'>"""
(idsLines, idsInRels, idsRels) = findWaysAndRelsInRect("line")
idsLines = idsLines.union(idsInRels)
dumpWays(out, idsLines, {'waterway' : 'river', 'source' : 'dibavod'},
False, 0)
(idsAreas, idsInRels, idsRels) = findWaysAndRelsInRect("area")
dumpWays(out, idsAreas, {'natural' : 'water', 'source' : 'dibavod'},
True, 1000000)
dumpWays(out, idsInRels, {'natural' : 'water', 'source' : 'dibavod'},
True, 1000000)
dumpRels(out, idsRels, {'source' : 'dibavod', 'type' : 'multipolygon'})
print >> out, """</osm>"""
out.close()
cur.execute("COMMIT")
con.close()
if __name__ == '__main__':
main()
_______________________________________________
Talk-cz mailing list
Talk-cz@openstreetmap.org
http://lists.openstreetmap.org/listinfo/talk-cz