Skip to content

Commit 9cbe402

Browse files
committed
Add parallel exec
1 parent c8445c3 commit 9cbe402

File tree

2 files changed

+52
-32
lines changed

2 files changed

+52
-32
lines changed

Dockerfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,5 @@ WORKDIR /usr/src/app
55
COPY requirements.txt ./
66
RUN pip install --no-cache-dir -r requirements.txt
77
COPY scoutapm_exporter.py .
8+
COPY scout_data.py .
89
CMD [ "python", "scoutapm_exporter.py" ]

scoutapm_exporter.py

Lines changed: 51 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,16 @@
77
from prometheus_client import start_http_server, Gauge
88
from scout_data import ScoutAPMAPI
99
import logging
10+
from concurrent.futures import ThreadPoolExecutor, as_completed
1011

1112
base_url = "https://scoutapm.com"
1213
apikey = os.environ.get('scoutapm_apikey')
1314
scout_email = os.environ.get("scout_email")
1415
scout_password = os.environ.get("scout_password")
1516

1617
monitored_apps = os.environ.get('monitored_apps','disabled').split(',')
18+
raw_metrics_enabled = bool(os.environ.get('raw_metrics_enabled', False))
19+
default_metrics_enabled = bool(os.environ.get('default_metrics_enabled', True))
1720
time_between_polls = os.environ.get("time_between_polls", 60)
1821
prefix = os.environ.get("metric_name_prefix", "scoutapm")
1922
port = os.environ.get("port", 8000)
@@ -54,6 +57,8 @@ def extract_metrics(metric_info):
5457
return dataset["summaries"][metric_name]
5558

5659
def extract_last_value_metrics(app_id):
60+
print(f"Extracting raw metrics for {app_id}")
61+
5762
data = json.loads(raw_api.get_data(app_id=app_id))
5863

5964
series_raw_metrics = {}
@@ -70,10 +75,24 @@ def extract_last_value_metrics(app_id):
7075
last_value = metric['data'][-1][1]
7176
series_raw_metrics[series_name][metric_name.lower()] = last_value
7277
except:
73-
logging.error(f"Can't extract latest data of {series_name} -> {metric_name}")
78+
logging.error(f"Can't extract latest data for app: {app_id} of {series_name} -> {metric_name}")
7479

7580
return series_raw_metrics
7681

82+
def process_metric(metric_info):
83+
app_name, app_id, metric_name = metric_info
84+
value = extract_metrics(metric_info)
85+
if value is not None:
86+
metrics[metric_name].labels(app_name=app_name).set(value)
87+
88+
def process_raw_metric(metric_info):
89+
app_name, app_id, metric_name = metric_info
90+
values = extract_last_value_metrics(app_id=app_id)
91+
for metric in values:
92+
for component in values[metric]:
93+
if values[metric][component] is not None:
94+
metrics[metric_name].labels(app_name=app_name,component=component).set(values[metric_name][component])
95+
7796
if __name__ == '__main__':
7897
print("Extracting apps")
7998
apps = scoutapm_call('/api/v0/apps')
@@ -83,42 +102,42 @@ def extract_last_value_metrics(app_id):
83102
if app['name'] not in monitored_apps and monitored_apps[0] != 'disabled':
84103
# We don't need to extract these metrics"
85104
continue
86-
print(f"Extracting available metrics for {app}")
87-
88-
# result: {'name': 'MyPlex', 'id': 119}
89-
available_metrics = scoutapm_call('/api/v0/apps/%d/metrics' % app['id'])
90-
for metric in available_metrics['availableMetrics']:
91-
data.append((app['name'], app['id'], metric))
92-
if not metrics.get(metric):
93-
metrics[metric] = Gauge("%s_%s" % (prefix,metric), metric, ['app_name'])
105+
if default_metrics_enabled:
106+
print(f"Extracting available metrics for {app}")
107+
108+
# result: {'name': 'MyPlex', 'id': 119}
109+
available_metrics = scoutapm_call('/api/v0/apps/%d/metrics' % app['id'])
110+
for metric in available_metrics['availableMetrics']:
111+
data.append((app['name'], app['id'], metric))
112+
if not metrics.get(metric):
113+
metrics[metric] = Gauge("%s_%s" % (prefix,metric), metric, ['app_name'])
94114

95115
# Get raw metrics
96-
raw_metrics = extract_last_value_metrics(app_id=app['id'])
116+
if raw_metrics_enabled:
117+
raw_metrics = extract_last_value_metrics(app_id=app['id'])
97118

98-
for metric in raw_metrics:
99-
data_raw.append((app['name'], app['id'], metric))
100-
if not metrics.get(metric):
101-
metrics[metric] = Gauge("%s_%s" % (prefix,metric), metric, ['app_name','component'])
119+
for metric in raw_metrics:
120+
data_raw.append((app['name'], app['id'], metric))
121+
if not metrics.get(metric):
122+
metrics[metric] = Gauge("%s_%s" % (prefix,metric), metric, ['app_name','component'])
102123

103124
print("Starting HTTP Server on port %s" % port)
104125
# Start up the server to expose the metrics.
105126
start_http_server(int(port))
106127

107-
while True:
108-
for metric_info in data:
109-
app_name, app_id, metric_name = metric_info
110-
value = extract_metrics(metric_info)
111-
if value is not None:
112-
metrics[metric_name].labels(app_name=app_name).set(value)
113-
114-
for metric_info in data_raw:
115-
app_name, app_id, metric_name = metric_info
116-
values = extract_last_value_metrics(app_id=app_id)
117-
for metric in values:
118-
for component in values[metric]:
119-
if values[metric][component] is not None:
120-
metrics[metric_name].labels(app_name=app_name,component=component).set(values[metric_name][component])
121-
122-
# We don't get data updated every seconds, so let's wait 1 minute between polls
123-
print("Waiting %d seconds" % time_between_polls)
124-
sleep(time_between_polls)
128+
with ThreadPoolExecutor(max_workers=10) as executor:
129+
while True:
130+
# Parallelize the first loop
131+
if raw_metrics_enabled:
132+
futures = [executor.submit(process_raw_metric, metric_info) for metric_info in data_raw]
133+
for future in as_completed(futures):
134+
future.result()
135+
136+
# Parallelize the second loop
137+
if default_metrics_enabled:
138+
futures = [executor.submit(process_metric, metric_info) for metric_info in data]
139+
for future in as_completed(futures):
140+
future.result()
141+
142+
print("Waiting %d seconds" % time_between_polls)
143+
sleep(time_between_polls)

0 commit comments

Comments
 (0)