-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcreate_data_set.py
More file actions
185 lines (154 loc) · 6.74 KB
/
create_data_set.py
File metadata and controls
185 lines (154 loc) · 6.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
import argparse
import gc
import os
import sys
from datetime import datetime, timedelta
from time import time
import psycopg2
import numpy as np
from psycopg2.extras import DictCursor
from sklearn.model_selection import train_test_split
# from matplotlib import pyplot as plt
from deep_geometry.vectorizer import vectorize_wkt
from tqdm import tqdm
SCRIPT_VERSION = '2.0'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
TRAIN_DATA_FILE = 'building_energy_train_v{}'.format(SCRIPT_VERSION)
VAL_DATA_FILE = 'building_energy_val_v{}'.format(SCRIPT_VERSION)
TEST_DATA_FILE = 'building_energy_test_v{}'.format(SCRIPT_VERSION)
UNIT_TEST_DATA_FILE = 'building_energy_unit_test_v{}'.format(SCRIPT_VERSION)
SCRIPT_START = time()
parser = argparse.ArgumentParser(description='Create energy label prediction data set')
parser.add_argument('-H', '--host', type=str, help='database host', required=True)
parser.add_argument('-P', '--port', type=str, help='database port')
parser.add_argument('-d', '--database', type=str, help='database name', required=True)
parser.add_argument('-u', '--user', type=str, help='database user', required=True)
parser.add_argument('-p', '--password', type=str, help='database password')
parser.add_argument('-o', '--output_folder', type=str, help='output folder', required=True)
args = parser.parse_args()
purpose_to_english = {
"woonfunctie": "residential",
"bijeenkomstfunctie": "gathering",
"celfunctie": "cell",
"gezondheidszorgfunctie": "health",
"industriefunctie": "industry",
"kantoorfunctie": "office",
"logiesfunctie": "lodging",
"onderwijsfunctie": "education",
"sportfunctie": "sports",
"winkelfunctie": "shopping",
"overige gebruiksfunctie": "other",
}
VOCABULARY = '0123456789abcdefghijklmnopqrstuvwxyz'
PURPOSES = [
"residential",
"gathering",
"cell",
"health",
"industry",
"office",
"lodging",
"education",
"sports",
"shopping",
"other",
]
ENERGY_CLASSES = ['A++', 'A+', 'A', 'B', 'C', 'D', 'E', 'F', 'G']
def get_data_from_db(cursor):
"""
Get data from the database given a query-instantiated cursor
:param cursor: query-instantiated database cursor
:return: tuple of labels and training data
"""
training_data, labels = [], []
cols = [desc[0] for desc in cursor.description]
for record in tqdm(cursor, total=cursor.rowcount):
record = dict(record)
record['purposes'] = [purpose_to_english[p] for p in record['purposes']]
# just duplicate for house_number and year of construction
record['house_number_vec'] = record['house_number']
record['year_of_construction_vec'] = record['year_of_construction']
# one-hot encoding for house number addition
if record['house_number_addition']:
hna = np.zeros(shape=(len(record['house_number_addition']), len(VOCABULARY)))
for idx, char in enumerate(record['house_number_addition']):
hna[idx, VOCABULARY.index(char.lower())] = 1.
else:
hna = np.zeros(shape=(1, len(VOCABULARY)))
record['house_number_addition_vec'] = hna
# 'multi-hot' encoding for building purposes
purposes = np.zeros(shape=(len(PURPOSES,)))
for purpose in record['purposes']:
purposes[PURPOSES.index(purpose)] = 1.
record['purposes_vec'] = purposes
# character-level vectorization of postal code
pc = np.zeros((len(record['postal_code']), len(VOCABULARY)))
for idx, char in enumerate(record['postal_code']):
pc[idx, VOCABULARY.index(char.lower())] = 1.
record['postal_code_vec'] = pc
# building geometry vectorization
geom = record['geometry_crs84']
geom = vectorize_wkt(geom)
record['geometry_vec'] = geom
record['centroid_vec'] = vectorize_wkt(record['centroid_crs84'])[0, :2]
# vectorization of neighbouring buildings
neighbours = record['neighbouring_buildings_crs84']
neighbours = vectorize_wkt(neighbours)
record['neighbouring_buildings_vec'] = neighbours
rd = record['recorded_date']
record['recorded_date_vec'] = [rd.year, rd.month, rd.day, rd.weekday()]
rgd = record['registration_date']
record['registration_date_vec'] = [rgd.year, rgd.month, rgd.day, rgd.weekday()]
training_data.append(record)
labels.append({
'energy_performance_index': record['energy_performance_index'],
'energy_performance_label': record['energy_performance_label'],
'energy_performance_vec': ENERGY_CLASSES.index(record['energy_performance_label'])
})
return training_data, labels
def main():
try:
connection = psycopg2.connect(
host=args.host, database=args.database,
user=args.user, password=args.password if 'password' in args else None,
connect_timeout=3)
except psycopg2.OperationalError as e:
print('Unable to connect:', e)
sys.exit(1)
connection.set_client_encoding('utf-8')
cursor = connection.cursor(cursor_factory=DictCursor)
print('Constructing database query result, this will take a few minutes...')
data_query = open('epl_all_data.sql', mode='r', encoding='utf-8').read()
cursor.execute(data_query)
runtime = time() - SCRIPT_START
print('Query executed in', timedelta(seconds=runtime))
data, labels = get_data_from_db(cursor)
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.2, shuffle=True)
# clean up and free some memory
connection.close()
del data, labels, cursor, connection
gc.collect()
val_data, test_data, val_labels, test_labels = train_test_split(test_data, test_labels, test_size=0.5)
print('Saving unit test data')
np.savez_compressed(os.path.join(args.output_folder, UNIT_TEST_DATA_FILE),
data=train_data[:100],
labels=train_labels[:100])
print('Saving test data...')
np.savez_compressed(os.path.join(args.output_folder, TEST_DATA_FILE),
data=test_data,
labels=test_labels)
print('Saving validation data...')
np.savez_compressed(os.path.join(args.output_folder, VAL_DATA_FILE),
data=val_data,
labels=val_labels)
print('Saving training data...')
parts = 10
for part in range(parts):
np.savez_compressed(os.path.join(args.output_folder, TRAIN_DATA_FILE + '_part_' + str(part + 1)),
data=train_data[part::parts],
labels=train_labels[part::parts])
if __name__ == '__main__':
main()
runtime = time() - SCRIPT_START
print('Done in', timedelta(seconds=runtime))