Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 19 additions & 4 deletions benchmarking/frameworks/framework_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import random
import re
import shutil
from copy import deepcopy

from bridge.file_storage.upload_files.file_uploader import FileUploader
from data_converters.data_converters import getConverters
Expand Down Expand Up @@ -547,16 +548,30 @@ def _runCommands(
main_command,
)
profiling_enabled = False
profiling_args = {}
if "profiler" in test:
profiling_enabled = test.get("profiler", {}).get("enabled", False)
profiling_enabled = test["profiler"].get("enabled", False)
if profiling_enabled:
platform_args["profiler_args"] = test.get("profiler", {})
# test[] is potentially raw user input so we need to ensure
# ensure all fields are populated so we don't have to check elsewhere
profiling_args = deepcopy(test["profiler"])
default_profiler = (
"perfetto"
if "cpu" not in profiling_args.get("types", ["cpu"])
else "simpleperf"
)
profiler = profiling_args.setdefault("profiler", default_profiler)
default_type = "memory" if profiler == "perfetto" else "cpu"
profiling_args.setdefault("types", [default_type])
profiling_args.setdefault("options", {})
platform_args["model_name"] = getModelName(model)
for idx, cmd in enumerate(cmds):
# note that we only enable profiling for the last command
# of the main commands.
platform_args["enable_profiling"] = (
profiling_enabled and main_command and idx == len(cmds) - 1
platform_args["profiling_args"] = (
profiling_args
if (profiling_enabled and main_command and idx == len(cmds) - 1)
else {"enabled": False}
)
one_output = self.runOnPlatform(
total_num, cmd, platform, platform_args, converter
Expand Down
116 changes: 87 additions & 29 deletions benchmarking/platforms/android/android_platform.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

from degrade.degrade_base import DegradeBase, getDegrade
from platforms.platform_base import PlatformBase
from profilers.perfetto.perfetto import Perfetto
from profilers.profilers import getProfilerByUsage
from six import string_types
from utils.custom_logger import getLogger
Expand Down Expand Up @@ -71,7 +72,7 @@ def _setLogCatSize(self):
# We know this command may fail. Avoid propogating this
# failure to the upstream
success = getRunStatus()
ret = self.util.run(["logcat", "-G", str(size) + "K"], timeout=2, retry=1)
ret = self.util.logcat("-G", str(size) + "K")
setRunStatus(success, overwrite=True)
if len(ret) > 0 and ret[0].find("failed to") >= 0:
repeat = True
Expand Down Expand Up @@ -213,7 +214,6 @@ def runBinaryBenchmark(self, cmd, *args, **kwargs):
"log_to_screen_only" in kwargs and kwargs["log_to_screen_only"]
)
platform_args = {}
meta = {}
if "platform_args" in kwargs:
platform_args = kwargs["platform_args"]
if "taskset" in platform_args:
Expand All @@ -236,38 +236,96 @@ def runBinaryBenchmark(self, cmd, *args, **kwargs):
)
platform_args["non_blocking"] = True
del platform_args["power"]
if platform_args.get("enable_profiling", False):
# attempt to run with profiling, else fallback to standard run
try:
simpleperf = getProfilerByUsage(
"android",
None,
platform=self,
model_name=platform_args.get("model_name", None),
cmd=cmd,
)
if simpleperf:
f = simpleperf.start()
output, meta = f.result()
if not output or not meta:
raise RuntimeError(
"No data returned from Simpleperf profiler."
)
log_logcat = []
if not log_to_screen_only:
log_logcat = self.util.logcat("-d")
return output + log_logcat, meta
# if this has not succeeded for some reason reset run status and run without profiling.
except Exception:
getLogger().critical(
f"An error has occurred when running Simpleperf profiler on device {self.platform} {self.platform_hash}.",
exc_info=True,
enable_profiling = platform_args.get("profiling_args", {}).get(
"enabled", False
)
if enable_profiling:
profiler = platform_args["profiling_args"]["profiler"]
profiling_types = platform_args["profiling_args"]["types"]
if profiler == "simpleperf":
assert profiling_types == [
"cpu"
], "Only cpu profiling is supported for SimplePerf"
try:
# attempt to run with cpu profiling, else fallback to standard run
return self._runBenchmarkWithSimpleperf(
cmd, log_to_screen_only, **platform_args
)
except Exception:
# if this has not succeeded for some reason reset run status and run without profiling.
getLogger().critical(
f"An error has occurred when running Simpleperf profiler on device {self.platform} {self.platform_hash}.",
exc_info=True,
)
elif profiler == "perfetto":
assert (
"cpu" not in profiling_types
), "cpu profiling is not yet implemented for Perfetto"
try:
# attempt Perfetto profiling
return self._runBenchmarkWithPerfetto(
cmd, log_to_screen_only, **platform_args
)
except Exception:
# if this has not succeeded for some reason reset run status and run without profiling.
getLogger().critical(
f"An error has occurred when running Perfetto profiler on device {self.platform} {self.platform_hash}.",
exc_info=True,
)
else:
getLogger().error(
f"Ignoring unsupported profiler setting: {profiler}: {profiling_types}.",
)

# Run without profiling
return self._runBinaryBenchmark(cmd, log_to_screen_only, **platform_args)

def _runBinaryBenchmark(self, cmd, log_to_screen_only: bool, **platform_args):
log_screen = self.util.shell(cmd, **platform_args)
log_logcat = []
if not log_to_screen_only:
log_logcat = self.util.logcat("-d")
return log_screen + log_logcat, meta
return log_screen + log_logcat, {}

def _runBenchmarkWithSimpleperf(
self, cmd, log_to_screen_only: bool, **platform_args
):
simpleperf = getProfilerByUsage(
"android",
None,
platform=self,
model_name=platform_args.get("model_name", None),
cmd=cmd,
)
if simpleperf:
f = simpleperf.start()
output, meta = f.result()
if not output or not meta:
raise RuntimeError("No data returned from Simpleperf profiler.")
log_logcat = []
if not log_to_screen_only:
log_logcat = self.util.logcat("-d")
return output + log_logcat, meta

def _runBenchmarkWithPerfetto(self, cmd, log_to_screen_only: bool, **platform_args):
# attempt Perfetto profiling
if not self.util.isRootedDevice(silent=True):
raise RuntimeError(
"Attempted to perform Perfetto profiling on unrooted device {self.util.device}."
)

with Perfetto(
platform=self,
types=platform_args["profiling_args"]["types"],
options=platform_args["profiling_args"]["options"],
) as perfetto:
getLogger().info("Invoked with Perfetto.")
log_screen = self.util.shell(cmd, **platform_args)
log_logcat = []
if not log_to_screen_only:
log_logcat = self.util.logcat("-d")
meta = perfetto.getResults()
return log_screen + log_logcat, meta

def collectMetaData(self, info):
meta = super(AndroidPlatform, self).collectMetaData(info)
Expand Down
Empty file.
Loading