Lab Test Kernel Config Sizes

From eLinux.org
Jump to: navigation, search

This page describes a test developed for use with the CELF Open Test Lab. See Open Test Lab for details.

Table Of Contents:


Description

This test measures the size impact of turning on or off certain kernel configuration options, for a single kernel. This test uses 'target' to build a baseline kernel image, and then loops building kernel images with configuration options at different settings. The test records the kernel size after each build. At the end, a table is printed showing the

The purpose of this test is to automatically determine, for different kernel versions, architectures, and compiler versions, what the size cost of each configuration option is.

Pre-requisites

This test needs 'target' to be installed and correctly configured in order to operate.

Please see Target Program Usage Guide for instructions for installing, configuring and using that program.

Download

Here is the test:

  • config-sizes-test-0.6.py
#!/usr/bin/python
#
# config-sizes-test.py - python routine to generate size data from kernel
#
# To Do for config-sizes-test.py
# * populate option_list with all config options to test
# * get detailed size information for option/value pair
# * get diff between option/value pair and baseline
#   * use scripts/bloat-o-meter
# * sort results by size delta
#
# To Do for test framework:
# * allow sub-command output to pass through if "-v" is specified
# * create vprint routine to print status lines only in verbose mode
# * auto-detect if manual_reset is needed
# * ring a bell when manual_reset is needed
# * make a convenience routine to perform "target" commands
# * create table of summary results at end of test
#   * support text, wiki or html output format for table
#
# BUGS:
#

import os, sys
import commands
import re
import time

MAJOR_VERSION = 0
MINOR_VERSION = 6

######################################
# Define some globals for this test suite

test_suite_name="Size-test"
src_dir = "test-linux"

yes_no_options = ["PRINTK", "BUG", "ELF_CORE", "PROC_KCORE", "AIO", "XATTR",
	"FILE_LOCKING", "DIRECTIO", "KALLSYMS", "KALLSYMS_ALL",
]

option_list = [("CONFIG_MAX_SWAPFILES_SHIFT", ('5','1','0')),
	("CONFIG_NR_LDISCS", ('16','1')),
	("MAX_USER_RT_PRIO", ('100', '5')),
]

######################################
# Define some convience classes and functions

def usage():
	print """Usage: %s [-h] <target>

where <target> is the name of the target to be used with
the 'target' command.

-h	show this usage help
-V	show version
""" % os.path.basename(sys.argv[0])

class test_run_class:
	def __init__(self, suite_name):
		self.suite_name = suite_name
		self.set_id('001')
		self.results_list = []

	def set_id(self, id):
		self.id = self.suite_name+"-"+id

	def show_results(self, format='text'):
		for (rtype, result, extra_data) in self.results_list:
			# FIXTHIS - rtype is just thrown away right now
			# FIXTHIS - should vary output depending on format arg
			print result

# maybe these should be test_run_class methods???
# (but it makes actual test code more verbose)
def result_out(test_run, msg, extra_data=''):
	out_msg = "[TEST: %s] Result - %s" % (test_run.id, msg)
	print out_msg
	sys.stdout.flush()
	test_run.results_list.append(("result", out_msg, extra_data))

def success(test_run, msg, extra_data=''):
	out_msg = "[TEST: %s] Success - %s" % (test_run.id, msg)
	print out_msg
	sys.stdout.flush()
	test_run.results_list.append(("success", out_msg, extra_data))

def failure(test_run, msg, extra_data=''):
	out_msg = "[TEST: %s] Failure - %s" % (test_run.id, msg)
	print out_msg
	sys.stdout.flush()
	test_run.results_list.append(("failure", out_msg, extra_data))

def do_command(cmd, exception_on_error=0):
	print "  Executing '%s'" % cmd
	sys.stdout.flush()
	(rcode, result) = commands.getstatusoutput(cmd)
	if rcode:
		err_str = 'Error running cmd "%s"' % cmd
		print err_str
		print 'command output=%s' % result
		sys.stdout.flush()
		if exception_on_error:
			raise ValueError, err_str
	return rcode

def do_command_result(cmd, exception_on_error=0):
	print "  Executing '%s'" % cmd
	sys.stdout.flush()
	(rcode, result) = commands.getstatusoutput(cmd)
	if rcode:
		err_str = 'Error running cmd "%s"' % cmd
		print err_str
		print 'command output=%s' % result
		sys.stdout.flush()
		if exception_on_error:
			raise ValueError, err_str
	return (rcode, result)

# unused right now - may use later to get value for manual_reset
def get_target_value(target, var_name):
	cmd = "target %s info -n %s" % (target, var_name)
	(rcode, result) = commands.getstatusoutput(cmd)
	if rcode:
		err_str = 'Error running cmd "%s"' % cmd
		raise ValueError, err_str
	value = result
	return value
	

#################################################
# Parse the command line

if len(sys.argv)<2:
	print "Error: missing target to run test on."
	usage()
	sys.exit(1)

if '-h' in sys.argv:
	usage()
	sys.exit(1)

if '-V' in sys.argv:
	print "size-test.py Version %d.%d" % (MAJOR_VERSION, MINOR_VERSION)
	sys.exit(1)

target = sys.argv[1]

# verify that target is supported by 'target'
(rcode, result) = commands.getstatusoutput('target list -q')
if rcode:
	print "Error: Problem running 'target list'"
	sys.exit(1)

tlist = result.split("\n");
if target not in tlist:
	print "Error: target '%s' not supported on this host." % target
	print "Available targets are:"
	for t in tlist:
		print "   %s" % t
	print
	usage()
	sys.exit(1)


#################################################
# Here is the actual testing

print "Running tests on target: %s" % target

# test run prep
# 1. get kernel
# 2. get default config
print "Doing test preparation for %s tests..." % test_suite_name

rcode = do_command("target %s get_kernel -o %s" % (target, src_dir), 1)
#print "*** Skipping get_kernel"

rcode = do_command("install -d build/%s" % target, 1)

os.chdir(src_dir)

rcode = do_command("target %s get_config" % target, 1)

# make test_run instance
test_run = test_run_class(test_suite_name)

#  build baseline kernel
rcode = do_command("target %s kbuild" % target)
#print "*** Skipping kbuild"
if rcode:
	failure(test_run, "Could not build kernel")

# get size data for baseline kernel
(rcode,result) = do_command_result("size ../build/%s/vmlinux" % target)
if rcode:
	failure(test_run, "Could not get baseline kernel size")
else:
	result_out(test_run, "baseline kernel size:\n" + result)

rcode = do_command("mv ../build/%s/vmlinux ../build/%s/vmlinux.baseline" % (target, target))
rcode = do_command("mv ../build/%s/.config ../build/%s/config.baseline" % (target, target))
rcode = do_command("mv ../build/%s/System.map ../build/%s/System.map.baseline" % (target, target))

# build option list
for option in yes_no_options:
	option_list.append(("CONFIG_"+option, ('y','n')))

#############################################
# CONFIG_SIZE-001

def test_option(option, value):
	test_run.set_id("%s=%s" % (option, value))
	print "Running test %s..." % test_run.id

	# set specific configuration value
	rcode = do_command("target %s get_config" % target, 1)
	rcode = do_command("target %s set_config CONFIG_EMBEDDED=y" % target)
	rcode = do_command("target %s set_config %s=%s" % (target, option, value))

	rcode = do_command('target %s setenv -o >env-temp ; source env-temp ; echo rm env-temp ; yes "" | make oldconfig' % target);

	# save off .config for test script debugging
	rcode = do_command("cp ../build/%s/.config ../build/%s/config.%s-%s" % (target, target, option, value))
	
	# verify this config was accepted
	if value=='n':
		option_line = "%s is not set" % option
	else:
		option_line = "%s=%s" % (option, value)
	rcode = do_command('grep "%s" ../build/%s/.config' % (option_line, target))
	print "grep rcode=%d" % rcode
	if rcode:
		failure(test_run, "Could not set configure option %s to value %s" % (option, value))
		return

	# check if this setting matches baseline
	rcode = do_command("diff ../build/%s/config.baseline ../build/%s/.config | grep %s" % (target, target, option))
	if rcode:
		# no difference found, option must be same as in baseline, skip it
		result_out(test_run, 'Setting "%s=%s" matches baseline - skipping build' % (option, value))
		return

	# remove last kernel built, if any
	(rcode) = do_command("rm ../build/%s/vmlinux" % target)

	# build kernel
	rcode = do_command("target %s kbuild" % target)
	#print "*** Skipping kbuild"
	if rcode:
		failure(test_run, "Could not build kernel")
		return

	# FIXTHIS - verify that the kernel built OK
	# (using something other than rcode from do_command() )

	# get size data for kernel
	(rcode, result) = do_command_result("size ../build/%s/vmlinux" % target)
	if rcode:
		failure(test_run, "Could not get size for kernel with %s=%s" % (option, value))
	else:
		result_out(test_run, "kernel size with %s=%s:\n" % (option, value) + result)
	(rcode, result) = do_command_result("size-delta ../build/%s/vmlinux.baseline ../build/%s/vmlinux" % (target, target))
	if rcode:
		failure(test_run, "Could not get size-delta for kernel with %s=%s" % (option, value))
	else:
		result_out(test_run, "kernel size-delta (versus baseline) with %s=%s:\n" % (option, value) + result)


	# install kernel
#	if not rcode:
#		rcode = do_command("target %s kinstall" % target, 1)
#		if rcode:
#			failure(test_run, "Could not install kernel")
#
#	# reset target
#	if not rcode:
#		if manual_reset:
#			print "*** Manual reset required - please reset the board and hit <enter>"
#			sys.stdin.readline()
#		else:
#			rcode = do_command("target %s reset" % target)
#		print "Sleeping %d seconds to wait for board to reset" % reset_sleep
#		time.sleep(reset_sleep)
#
#	# FIXTHIS - verify that the running kernel is the one just built
#
#	# get used mem on machine
#	if not rcode:
#		cmd = 'target %s run "free -t"' % target
#		print "  Executing '%s'" % cmd
#		(rcode, result) = commands.getstatusoutput(cmd)
#		if rcode:
#			print "Error collecting results from dmesg"
#			print "result=", result
#
#
	# save off kernel and map
	rcode = do_command("cp ../build/%s/vmlinux ../build/%s/vmlinux.%s-%s" % (target, target, option, value))
	rcode = do_command("cp ../build/%s/System.map ../build/%s/System.map.%s-%s" % (target, target, option, value))

# DEBUGGING - use temporary option_list for testing
#option_list = [("CONFIG_BUG",("y","n")), ("CONFIG_FOO", ("y", "n"))]

for (option, values) in option_list:
	for value in values:
		test_option(option, value)

print "\n###########################################"
print "Results summary:"
test_run.show_results()


To Do

Things to work on:

  • should summarize results in an html or wiki table
  • need to expand the list of options to test
  • need to validate that kernel built successfully after each build
  • should also run the kernel, and get a measurement of dynamic memory usage
    • need to record if kernel boots or not with adjusted config
  • should also run with alternate base configs (allnoconfig or allyesconfig)
  • need to have a way to compare results with results on other platforms.
    • need to consolidate results tables (e.g. CONFIG_PRINTK=n on ARM, PPC, i386, etc.)
  • need to handle option dependencies

Notes

[put miscellaneous notes here]