Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
continue
elif hole.intersects(polygons[place_id]):
polygons[place_id] = polygons[place_id].union(hole)
changed = True
if changed:
break
if not changed:
unassigned.append(hole)
retries += 1
if len(unassigned) == len(holes):
# give up
break
holes = unassigned
print >>sys.stderr, "%d retried, %d unassigned." % (retries, len(unassigned))
hoodIndex = Rtree()
print >>sys.stderr, "Buffering polygons."
for place_id, polygon in polygons.items():
if type(polygon) is Polygon:
polygon = Polygon(polygon.exterior.coords)
else:
bits = []
for p in polygon.geoms:
if type(p) is Polygon:
bits.append(Polygon(p.exterior.coords))
polygon = MultiPolygon(bits)
polygons[place_id] = polygon.buffer(0)
hoodIndex.insert(place_id, polygons[place_id].bounds)
print >>sys.stderr, "Retconning blocks to shapes."
cur.execute("""select geom, geoid10 FROM tabblock10 tb WHERE statefp10 = %s AND countyfp10 = %s AND blockce10 NOT LIKE '0%%'""", (statefp10, countyfp10))
if do_cache:
print >>sys.stderr, "Caching points, blocks, and names ..."
pickle.dump((names, blocks, places), file(areaid + ".cache", "w"), -1)
blocks = map(asShape, blocks)
points = []
place_list = set()
count = 0
for place_id, pts in places.items():
count += 1
print >>sys.stderr, "\rPreparing %d of %d places..." % (count, len(places)),
for pt in pts:
place_list.add((len(points), pt+pt, None))
points.append((place_id, Point(pt)))
print >>sys.stderr, "Indexing...",
index = Rtree(place_list)
print >>sys.stderr, "Done."
def score_block(polygon):
centroid = polygon.centroid
#prepared = prep(polygon)
score = {}
outside_samples = 0
for item in index.nearest((centroid.x, centroid.y), num_results=SAMPLE_SIZE):
place_id, point = points[item]
score.setdefault(place_id, 0.0)
#if prepared.contains(point):
# score[place_id] += 1.0
#else:
score[place_id] += 1.0 / math.sqrt(max(polygon.distance(point)*SCALE_FACTOR, 1.0))
outside_samples += 1
return list(reversed(sorted((sc, place_id) for place_id, sc in score.items())))
def __init__(self, dbname,overwrite=False,rtree_index=True):
self.dbname = dbname
if overwrite:
try:
os.remove( dbname )
except OSError:
pass
self.conn = sqlite3.connect(dbname)
if rtree_index:
self.index = Rtree( dbname )
else:
self.index = None
if overwrite:
self.setup()
# SOFTWARE.
import itertools
import pickle
import typing
from typing import Tuple, TypeVar, Generic, Iterator
import rtree
from meridian.record import Record
T = TypeVar("T", bound=Record)
class FastRTree(rtree.Rtree):
"""A faster Rtree which uses a lower protocol when pickling objects for storage."""
def dumps(self, obj):
return pickle.dumps(obj, -1)
def _check_bounds(query: typing.Any):
"""Ensure the input object has a `bounds` attribute."""
if not hasattr(query, "bounds"):
raise AttributeError(
"Query argument to must "
"implement bounds property returning "
"(xmin, ymin, xmax, ymax)"
)
def r_tree(self):
if not hasattr(self, '_r_tree'):
# define properties
p = r_treeIndex.Property()
p.dimension = self.dim()
p.variant = r_treeIndex.RT_Star
index = np.concatenate((range(self.dim()), range(self.dim())))
# Generator provides required point format
def gen():
for id, coord in self:
yield (id, coord[index], id)
self._r_tree = Rtree(gen(), properties=p)
return self._r_tree
Parameters
----------
bldgs: gpd.GeoDataFrame
Data on buildings.
blockface: gpd.GeoSeries
A single blockface.
step_size: float
The step size, which controls the accuracy of the assessment (at the cost of speed).
Returns
-------
gpd.GeoDataFrame
Frontage match data.
"""
# TODO: use a smarter search strategy than simple iterative search
index = rtree.Rtree()
if len(bldgs) == 0:
return gpd.GeoDataFrame()
for idx, bldg in bldgs.iterrows():
index.insert(idx, bldg.geometry.bounds)
bldg_frontage_points = dict()
search_space = np.arange(0, 1, step_size)
next_search_space = []
while len(search_space) > 0:
for offset in search_space:
search_point = blockface.geometry.interpolate(offset, normalized=True)
nearest_bldg = list(index.nearest(search_point.bounds, 1))[0]
bldg_frontage_points[str(offset)[:6]] = nearest_bldg
and then exact polygon queries for a final result.
Parameters
-----------
polygons : (n,) shapely.geometry.Polygon
Polygons which only have exteriors and may overlap
Returns
-----------
roots : (m,) int
Index of polygons which are root
contains : networkx.DiGraph
Edges indicate a polygon is
contained by another polygon
"""
tree = Rtree()
# nodes are indexes in polygons
contains = nx.DiGraph()
for i, polygon in enumerate(polygons):
# if a polygon is None it means creation
# failed due to weird geometry so ignore it
if polygon is None or len(polygon.bounds) != 4:
continue
# insert polygon bounds into rtree
tree.insert(i, polygon.bounds)
# make sure every valid polygon has a node
contains.add_node(i)
# loop through every polygon
for i in contains.nodes():
polygon = polygons[i]
# we first query for bounding box intersections from the R-tree
def insert(self, box, name=None):
"makes a new node"
target = self.root().choose_leaf(box)
target.children.append(Rtree(target, box, name))
target.merge_up(box)
if len(target.children) > target.bigm:
target.divide_children()
def merge_up(self, box):
def divide_children(self):
children2 = [c for c in self.children]
self.children = []
while children2:
c1 = children2.pop()
newp = Rtree(self, c1.box)
newp.children.append(c1)
c1.parent = newp
self.children.append(newp)
if not children2:
break
c2 = smallest_merge(children2, newp.box)
newp.children.append(c2)
children2.remove(c2)
c2.parent = newp
newp.box = merge(c1.box, c2.box)
self.box = merge(*(c.box for c in self.children))
self.merge_up(self.box)
# Leaves are too small, but grow soon. Probably.