add lcov support
authorMathieu Lacage <mathieu.lacage@sophia.inria.fr>
Tue, 03 Oct 2006 10:20:55 +0200
changeset 116 d4ee28e845f3
parent 115 5435512761ef
child 117 a14f1fadc719
add lcov support
BUILD
build.py
utils/lcov/genhtml
utils/lcov/geninfo
utils/lcov/lcov
--- a/BUILD	Tue Oct 03 08:17:05 2006 +0200
+++ b/BUILD	Tue Oct 03 10:20:55 2006 +0200
@@ -62,6 +62,9 @@
   and opt-static
 Example: scons all
 
+- gcov: code coverage analysis. Build a debugging version of
+  the code for code coverage analysis in 'build-dir/gcov'.
+
 - dist: generate a release tarball and zipfile from the 
   source tree. The tarball and zipfile name are generated
   according to the version number stored in the SConstruct
--- a/build.py	Tue Oct 03 08:17:05 2006 +0200
+++ b/build.py	Tue Oct 03 10:20:55 2006 +0200
@@ -347,6 +347,19 @@
 		builders = self.gen_mod_dep (variant)
 		for builder in builders:
 			gcov_env.Alias ('gcov', builder)
+		gcov_env.Alias ('lcov-report')
+		if 'lcov-report' in COMMAND_LINE_TARGETS:
+			lcov_report_dir = os.path.join (self.build_dir, 'lcov-report')
+			create_dir_command = "rm -rf " + lcov_report_dir +
+			" && mkdir " + lcov_report_dir + ";"
+			gcov_env.Execute (create_dir_command)
+			info_file = os.path.join (lcov_report_dir, 'ns3.info')
+			lcov_command = "utils/lcov/lcov -c -d . -o " info_file
+			gcov_env.Execute (lcov_command)
+			genhtml_command = "utils/lcov/genhtml -o " + lcov_report_data +
+			" " + info_file 
+			gcov_env.Execute (genhtml_command)
+
 
 
 		opt_env = env.Copy ()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/utils/lcov/genhtml	Tue Oct 03 10:20:55 2006 +0200
@@ -0,0 +1,3475 @@
+#!/usr/bin/perl -w
+#
+#   Copyright (c) International Business Machines  Corp., 2002
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details. 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# genhtml
+#
+#   This script generates HTML output from .info files as created by the
+#   geninfo script. Call it with --help and refer to the genhtml man page
+#   to get information on usage and available options.
+#
+#
+# History:
+#   2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#                         IBM Lab Boeblingen
+#        based on code by Manoj Iyer <manjo@mail.utexas.edu> and
+#                         Megan Bock <mbock@us.ibm.com>
+#                         IBM Austin
+#   2002-08-27 / Peter Oberparleiter: implemented frame view
+#   2002-08-29 / Peter Oberparleiter: implemented test description filtering
+#                so that by default only descriptions for test cases which
+#                actually hit some source lines are kept
+#   2002-09-05 / Peter Oberparleiter: implemented --no-sourceview
+#   2002-09-05 / Mike Kobler: One of my source file paths includes a "+" in
+#                the directory name.  I found that genhtml.pl died when it
+#                encountered it. I was able to fix the problem by modifying
+#                the string with the escape character before parsing it.
+#   2002-10-26 / Peter Oberparleiter: implemented --num-spaces
+#   2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
+#                when trying to combine .info files containing data without
+#                a test name
+#   2003-04-10 / Peter Oberparleiter: extended fix by Mike to also cover
+#                other special characters
+#   2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
+#   2003-07-10 / Peter Oberparleiter: added line checksum support
+#   2004-08-09 / Peter Oberparleiter: added configuration file support
+#
+
+use strict;
+use File::Basename; 
+use Getopt::Long;
+use Digest::MD5 qw(md5_base64);
+
+
+# Global constants
+our $title		= "LTP GCOV extension - code coverage report";
+our $lcov_version	= "LTP GCOV extension version 1.4";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+
+# Specify coverage rate limits (in %) for classifying file entries
+# HI:   $hi_limit <= rate <= 100          graph color: green
+# MED: $med_limit <= rate <  $hi_limit    graph color: orange
+# LO:          0  <= rate <  $med_limit   graph color: red
+our $hi_limit	= 50;
+our $med_limit	= 15;
+
+# Width of overview image
+our $overview_width = 80;
+
+# Resolution of overview navigation: this number specifies the maximum
+# difference in lines between the position a user selected from the overview
+# and the position the source code window is scrolled to.
+our $nav_resolution = 4;
+
+# Clicking a line in the overview image should show the source code view at
+# a position a bit further up so that the requested line is not the first
+# line in the window. This number specifies that offset in lines.
+our $nav_offset = 10;
+
+our $overview_title = "directory";
+
+# Data related prototypes
+sub print_usage(*);
+sub gen_html();
+sub process_dir($);
+sub process_file($$$);
+sub info(@);
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$$;$$);
+sub get_prefix(@);
+sub shorten_prefix($);
+sub get_dir_list(@);
+sub get_relative_base_path($);
+sub read_testfile($);
+sub get_date_string();
+sub split_filename($);
+sub create_sub_dir($);
+sub subtract_counts($$);
+sub add_counts($$);
+sub apply_baseline($$);
+sub remove_unused_descriptions();
+sub get_found_and_hit($);
+sub get_affecting_tests($);
+sub combine_info_files($$);
+sub merge_checksums($$$);
+sub combine_info_entries($$$);
+sub apply_prefix($$);
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+
+
+# HTML related prototypes
+sub escape_html($);
+sub get_bar_graph_code($$$);
+
+sub write_png_files();
+sub write_css_file();
+sub write_description_file($$$);
+
+sub write_html(*$);
+sub write_html_prolog(*$$);
+sub write_html_epilog(*$;$);
+
+sub write_header(*$$$$$);
+sub write_header_prolog(*$);
+sub write_header_line(*$$;$$);
+sub write_header_epilog(*$);
+
+sub write_file_table(*$$$$);
+sub write_file_table_prolog(*$$);
+sub write_file_table_entry(*$$$$);
+sub write_file_table_detail_heading(*$$);
+sub write_file_table_detail_entry(*$$$);
+sub write_file_table_epilog(*);
+
+sub write_test_table_prolog(*$);
+sub write_test_table_entry(*$$);
+sub write_test_table_epilog(*);
+
+sub write_source($$$$$);
+sub write_source_prolog(*);
+sub write_source_line(*$$$$);
+sub write_source_epilog(*);
+
+sub write_frameset(*$$$);
+sub write_overview_line(*$$$);
+sub write_overview(*$$$$);
+
+# External prototype (defined in genpng)
+sub gen_png($$$@);
+
+
+# Global variables & initialization
+our %info_data;		# Hash containing all data from .info file
+our $dir_prefix;	# Prefix to remove from all sub directories
+our %test_description;	# Hash containing test descriptions if available
+our $date = get_date_string();
+
+our @info_filenames;	# List of .info files to use as data source
+our $test_title;	# Title for output as written to each page header
+our $output_directory;	# Name of directory in which to store output
+our $base_filename;	# Optional name of file containing baseline data
+our $desc_filename;	# Name of file containing test descriptions
+our $css_filename;	# Optional name of external stylesheet file to use
+our $quiet;		# If set, suppress information messages
+our $help;		# Help option flag
+our $version;		# Version option flag
+our $show_details;	# If set, generate detailed directory view
+our $no_prefix;		# If set, do not remove filename prefix
+our $frames;		# If set, use frames for source code view
+our $keep_descriptions;	# If set, do not remove unused test case descriptions
+our $no_sourceview;	# If set, do not create a source code view for each file
+our $highlight;		# If set, highlight lines covered by converted data only
+our $tab_size = 8;	# Number of spaces to use in place of tab
+our $config;		# Configuration file contents
+
+our $cwd = `pwd`;	# Current working directory
+chomp($cwd);
+our $tool_dir = dirname($0);	# Directory where genhtml tool is installed
+
+
+#
+# Code entry point
+#
+
+# Add current working directory if $tool_dir is not already an absolute path
+if (! ($tool_dir =~ /^\/(.*)$/))
+{
+	$tool_dir = "$cwd/$tool_dir";
+}
+
+# Read configuration file if available
+if (-r $ENV{"HOME"}."/.lcovrc")
+{
+	$config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+	$config = read_config("/etc/lcovrc");
+}
+
+if ($config)
+{
+	# Copy configuration file values to variables
+	apply_config({
+		"genhtml_css_file"		=> \$css_filename,
+		"genhtml_hi_limit"		=> \$hi_limit,
+		"genhtml_med_limit"		=> \$med_limit,
+		"genhtml_overview_width"	=> \$overview_width,
+		"genhtml_nav_resolution"	=> \$nav_resolution,
+		"genhtml_nav_offset"		=> \$nav_offset,
+		"genhtml_keep_descriptions"	=> \$keep_descriptions,
+		"genhtml_no_prefix"		=> \$no_prefix,
+		"genhtml_no_source"		=> \$no_sourceview,
+		"genhtml_num_spaces"		=> \$tab_size,
+		"genhtml_highlight"		=> \$highlight});
+}
+
+# Parse command line options
+if (!GetOptions("output-directory=s"	=> \$output_directory,
+		"title=s"		=> \$test_title,
+		"description-file=s"	=> \$desc_filename,
+		"keep-descriptions"	=> \$keep_descriptions,
+		"css-file=s"		=> \$css_filename,
+		"baseline-file=s"	=> \$base_filename,
+		"prefix=s"		=> \$dir_prefix,
+		"num-spaces=i"		=> \$tab_size,
+		"no-prefix"		=> \$no_prefix,
+		"no-sourceview"		=> \$no_sourceview,
+		"show-details"		=> \$show_details,
+		"frames"		=> \$frames,
+		"highlight"		=> \$highlight,
+		"quiet"			=> \$quiet,
+		"help|h"		=> \$help,
+		"version"		=> \$version
+		))
+{
+	print_usage(*STDERR);
+	exit(1);
+}
+
+@info_filenames = @ARGV;
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print($lcov_version."\n");
+	exit(0);
+}
+
+# Check for info filename
+if (!@info_filenames)
+{
+	print(STDERR "No filename specified\n");
+	print_usage(*STDERR);
+	exit(1);
+}
+
+# Generate a title if none is specified
+if (!$test_title)
+{
+	if (scalar(@info_filenames) == 1)
+	{
+		# Only one filename specified, use it as title
+		$test_title = basename($info_filenames[0]);
+	}
+	else
+	{
+		# More than one filename specified, used default title
+		$test_title = "unnamed";
+	}
+}
+
+# Make sure css_filename is an absolute path (in case we're changing
+# directories)
+if ($css_filename)
+{
+	if (!($css_filename =~ /^\/(.*)$/))
+	{
+		$css_filename = $cwd."/".$css_filename;
+	}
+}
+
+# Make sure tab_size is within valid range
+if ($tab_size < 1)
+{
+	print(STDERR "ERROR: invalid number of spaces specified: ".
+		     "$tab_size!\n");
+	exit(1);
+}
+
+# Issue a warning if --no-sourceview is enabled together with --frames
+if ($no_sourceview && $frames)
+{
+	warn("WARNING: option --frames disabled because --no-sourceview ".
+	     "was specified!\n");
+	$frames = undef;
+}
+
+if ($frames)
+{
+	# Include genpng code needed for overview image generation
+	do("$tool_dir/genpng");
+}
+
+# Make sure output_directory exists, create it if necessary
+if ($output_directory)
+{
+	stat($output_directory);
+
+	if (! -e _)
+	{
+		system("mkdir", "-p", $output_directory)
+			and die("ERROR: cannot create directory $_!\n");
+	}
+}
+
+
+# Do something
+gen_html();
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+	my $executable_name = basename($0);
+
+	print(HANDLE <<END_OF_USAGE);
+Usage: $executable_name [OPTIONS] INFOFILE(S)
+
+Create HTML output for coverage data found in INFOFILE. Note that INFOFILE
+may also be a list of filenames.
+
+  -h, --help                        Print this help, then exit
+  -v, --version                     Print version number, then exit
+  -q, --quiet                       Do not print progress messages
+  -s, --show-details                Generate detailed directory view
+  -f, --frames                      Use HTML frames for source code view
+  -b, --baseline-file BASEFILE      Use BASEFILE as baseline file
+  -o, --output-directory OUTDIR     Write HTML output to OUTDIR
+  -t, --title TITLE                 Display TITLE in header of all pages
+  -d, --description-file DESCFILE   Read test case descriptions from DESCFILE
+  -k, --keep-descriptions           Do not remove unused test descriptions
+  -c, --css-file CSSFILE            Use external style sheet file CSSFILE
+  -p, --prefix PREFIX               Remove PREFIX from all directory names
+      --no-prefix                   Do not remove prefix from directory names
+      --no-source                   Do not create source code view
+      --num-spaces NUM              Replace tabs with NUM spaces in source view
+      --highlight                   Highlight lines with converted-only data
+
+See $lcov_url for more information about this tool.
+END_OF_USAGE
+	;
+}
+
+
+#
+# gen_html()
+#
+# Generate a set of HTML pages from contents of .info file INFO_FILENAME.
+# Files will be written to the current directory. If provided, test case
+# descriptions will be read from .tests file TEST_FILENAME and included
+# in ouput.
+#
+# Die on error.
+#
+
+sub gen_html()
+{
+	local *HTML_HANDLE;
+	my %overview;
+	my %base_data;
+	my $lines_found;
+	my $lines_hit;
+	my $overall_found = 0;
+	my $overall_hit = 0;
+	my $dir_name;
+	my $link_name;
+	my @dir_list;
+	my %new_info;
+
+	# Read in all specified .info files
+	foreach (@info_filenames)
+	{
+		info("Reading data file $_\n");
+		%new_info = %{read_info_file($_)};
+
+		# Combine %new_info with %info_data
+		%info_data = %{combine_info_files(\%info_data, \%new_info)};
+	}
+
+	info("Found %d entries.\n", scalar(keys(%info_data)));
+
+	# Read and apply baseline data if specified
+	if ($base_filename)
+	{
+		# Read baseline file
+		info("Reading baseline file $base_filename\n");
+		%base_data = %{read_info_file($base_filename)};
+		info("Found %d entries.\n", scalar(keys(%base_data)));
+
+		# Apply baseline
+		info("Subtracting baseline data.\n");
+		%info_data = %{apply_baseline(\%info_data, \%base_data)};
+	}
+
+	@dir_list = get_dir_list(keys(%info_data));
+
+	if ($no_prefix)
+	{
+		# User requested that we leave filenames alone
+		info("User asked not to remove filename prefix\n");
+	}
+	elsif (!defined($dir_prefix))
+	{
+		# Get prefix common to most directories in list
+		$dir_prefix = get_prefix(@dir_list);
+
+		if ($dir_prefix)
+		{
+			info("Found common filename prefix \"$dir_prefix\"\n");
+		}
+		else
+		{
+			info("No common filename prefix found!\n");
+			$no_prefix=1;
+		}
+	}
+	else
+	{
+		info("Using user-specified filename prefix \"".
+		     "$dir_prefix\"\n");
+	}
+
+	# Read in test description file if specified
+	if ($desc_filename)
+	{
+		info("Reading test description file $desc_filename\n");
+		%test_description = %{read_testfile($desc_filename)};
+
+		# Remove test descriptions which are not referenced
+		# from %info_data if user didn't tell us otherwise
+		if (!$keep_descriptions)
+		{
+			remove_unused_descriptions();
+		}
+	}
+
+	# Change to output directory if specified
+	if ($output_directory)
+	{
+		chdir($output_directory)
+			or die("ERROR: cannot change to directory ".
+			"$output_directory!\n");
+	}
+
+	info("Writing .css and .png files.\n");
+	write_css_file();
+	write_png_files();
+
+	info("Generating output.\n");
+
+	# Process each subdirectory and collect overview information
+	foreach $dir_name (@dir_list)
+	{
+		($lines_found, $lines_hit) = process_dir($dir_name);
+
+		# Remove prefix if applicable
+		if (!$no_prefix && $dir_prefix)
+		{
+			# Match directory names beginning with $dir_prefix
+			$dir_name = apply_prefix($dir_name, $dir_prefix);
+		}
+
+		# Generate name for directory overview HTML page
+		if ($dir_name =~ /^\/(.*)$/)
+		{
+			$link_name = substr($dir_name, 1)."/index.html";
+		}
+		else
+		{
+			$link_name = $dir_name."/index.html";
+		}
+
+		$overview{$dir_name} = "$lines_found,$lines_hit,$link_name";
+		$overall_found	+= $lines_found;
+		$overall_hit	+= $lines_hit;
+	}
+
+	# Generate overview page
+	info("Writing directory view page.\n");
+	open(*HTML_HANDLE, ">index.html")
+		or die("ERROR: cannot open index.html for writing!\n");
+	write_html_prolog(*HTML_HANDLE, "", "LCOV - $test_title");
+	write_header(*HTML_HANDLE, 0, "", "", $overall_found, $overall_hit);
+	write_file_table(*HTML_HANDLE, "", \%overview, {}, 0);
+	write_html_epilog(*HTML_HANDLE, "");
+	close(*HTML_HANDLE);
+
+	# Check if there are any test case descriptions to write out
+	if (%test_description)
+	{
+		info("Writing test case description file.\n");
+		write_description_file( \%test_description,
+					$overall_found, $overall_hit);
+	}
+
+	if ($overall_found == 0)
+	{
+		info("Warning: No lines found!\n");
+	}
+	else
+	{
+		info("Overall coverage rate: %d of %d lines (%.1f%%)\n",
+		     $overall_hit, $overall_found,
+		     $overall_hit*100/$overall_found);
+	}
+
+	chdir($cwd);
+}
+
+
+#
+# process_dir(dir_name)
+#
+
+sub process_dir($)
+{
+	my $abs_dir = $_[0];
+	my $trunc_dir;
+	my $rel_dir = $abs_dir;
+	my $base_dir;
+	my $filename;
+	my %overview;
+	my $lines_found;
+	my $lines_hit;
+	my $overall_found=0;
+	my $overall_hit=0;
+	my $base_name;
+	my $extension;
+	my $testdata;
+	my %testhash;
+	local *HTML_HANDLE;
+
+	# Remove prefix if applicable
+	if (!$no_prefix)
+	{
+		# Match directory name beginning with $dir_prefix
+		$rel_dir = apply_prefix($rel_dir, $dir_prefix);
+	}
+
+	$trunc_dir = $rel_dir;
+
+	# Remove leading /
+	if ($rel_dir =~ /^\/(.*)$/)
+	{
+		$rel_dir = substr($rel_dir, 1);
+	}
+
+	$base_dir = get_relative_base_path($rel_dir);
+
+	create_sub_dir($rel_dir);
+
+	# Match filenames which specify files in this directory, not including
+	# sub-directories
+	foreach $filename (grep(/^\Q$abs_dir\E\/[^\/]*$/,keys(%info_data)))
+	{
+		($lines_found, $lines_hit, $testdata) =
+			process_file($trunc_dir, $rel_dir, $filename);
+
+		$base_name = basename($filename);
+
+		if ($no_sourceview)
+		{
+			# User asked as not to create source code view, do not
+			# provide a page link
+			$overview{$base_name} =
+				"$lines_found,$lines_hit";
+		}
+		elsif ($frames)
+		{
+			# Link to frameset page
+			$overview{$base_name} =
+				"$lines_found,$lines_hit,".
+				"$base_name.gcov.frameset.html";
+		}
+		else
+		{
+			# Link directory to source code view page
+			$overview{$base_name} =
+				"$lines_found,$lines_hit,".
+				"$base_name.gcov.html";
+		}
+
+		$testhash{$base_name} = $testdata;
+
+		$overall_found	+= $lines_found;
+		$overall_hit	+= $lines_hit;
+	}
+
+	# Generate directory overview page (without details)
+	open(*HTML_HANDLE, ">$rel_dir/index.html")
+		or die("ERROR: cannot open $rel_dir/index.html ".
+		       "for writing!\n");
+	write_html_prolog(*HTML_HANDLE, $base_dir,
+			  "LCOV - $test_title - $trunc_dir");
+	write_header(*HTML_HANDLE, 1, $trunc_dir, $rel_dir, $overall_found,
+		     $overall_hit);
+	write_file_table(*HTML_HANDLE, $base_dir, \%overview, {}, 1);
+	write_html_epilog(*HTML_HANDLE, $base_dir);
+	close(*HTML_HANDLE);
+
+	if ($show_details)
+	{
+		# Generate directory overview page including details
+		open(*HTML_HANDLE, ">$rel_dir/index-detail.html")
+			or die("ERROR: cannot open $rel_dir/".
+			       "index-detail.html for writing!\n");
+		write_html_prolog(*HTML_HANDLE, $base_dir,
+				  "LCOV - $test_title - $trunc_dir");
+		write_header(*HTML_HANDLE, 1, $trunc_dir, $rel_dir,
+			     $overall_found,
+			     $overall_hit);
+		write_file_table(*HTML_HANDLE, $base_dir, \%overview,
+				 \%testhash, 1);
+		write_html_epilog(*HTML_HANDLE, $base_dir);
+		close(*HTML_HANDLE);
+	}
+
+	# Calculate resulting line counts
+	return ($overall_found, $overall_hit);
+}
+
+
+#
+# get_converted_lines(testdata)
+#
+# Return hash of line numbers of those lines which were only covered in
+# converted data sets.
+#
+
+sub get_converted_lines($)
+{
+	my $testdata = $_[0];
+	my $testcount;
+	my %converted;
+	my %nonconverted;
+	my $hash;
+	my $testcase;
+	my $line;
+	my %result;
+
+
+	# Get a hash containing line numbers with positive counts both for
+	# converted and original data sets
+	foreach $testcase (keys(%{$testdata}))
+	{
+		# Check to see if this is a converted data set
+		if ($testcase =~ /,diff$/)
+		{
+			$hash = \%converted;
+		}
+		else
+		{
+			$hash = \%nonconverted;
+		}
+
+		$testcount = $testdata->{$testcase};
+		# Add lines with a positive count to hash
+		foreach $line (keys%{$testcount})
+		{
+			if ($testcount->{$line} > 0)
+			{
+				$hash->{$line} = 1;
+			}
+		}
+	}
+
+	# Combine both hashes to resulting list
+	foreach $line (keys(%converted))
+	{
+		if (!defined($nonconverted{$line}))
+		{
+			$result{$line} = 1;
+		}
+	}
+
+	return \%result;
+}
+
+
+#
+# process_file(trunc_dir, rel_dir, filename)
+#
+
+sub process_file($$$)
+{
+	info("Processing file ".apply_prefix($_[2], $dir_prefix)."\n");
+
+	my $trunc_dir = $_[0];
+	my $rel_dir = $_[1];
+	my $filename = $_[2];
+	my $base_name = basename($filename);
+	my $base_dir = get_relative_base_path($rel_dir);
+	my $testdata;
+	my $testcount;
+	my $sumcount;
+	my $funcdata;
+	my $checkdata;
+	my $lines_found;
+	my $lines_hit;
+	my $converted;
+	my @source;
+	my $pagetitle;
+	local *HTML_HANDLE;
+
+	($testdata, $sumcount, $funcdata, $checkdata, $lines_found,
+		$lines_hit) = get_info_entry($info_data{$filename});
+
+	# Return after this point in case user asked us not to generate
+	# source code view
+	if ($no_sourceview)
+	{
+		return ($lines_found, $lines_hit, $testdata);
+	}
+
+	$converted = get_converted_lines($testdata);
+	# Generate source code view for this file
+	open(*HTML_HANDLE, ">$rel_dir/$base_name.gcov.html")
+		or die("ERROR: cannot open $rel_dir/$base_name.gcov.html ".
+		       "for writing!\n");
+	$pagetitle = "LCOV - $test_title - $trunc_dir/$base_name";
+	write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
+	write_header(*HTML_HANDLE, 2, "$trunc_dir/$base_name",
+		     "$rel_dir/$base_name", $lines_found, $lines_hit);
+	@source = write_source(*HTML_HANDLE, $filename, $sumcount, $checkdata,
+			       $converted);
+
+	write_html_epilog(*HTML_HANDLE, $base_dir, 1);
+	close(*HTML_HANDLE);
+
+	# Additional files are needed in case of frame output
+	if (!$frames)
+	{
+		return ($lines_found, $lines_hit, $testdata);
+	}
+
+	# Create overview png file
+	gen_png("$rel_dir/$base_name.gcov.png", $overview_width, $tab_size,
+		@source);
+
+	# Create frameset page
+	open(*HTML_HANDLE, ">$rel_dir/$base_name.gcov.frameset.html")
+		or die("ERROR: cannot open ".
+		       "$rel_dir/$base_name.gcov.frameset.html".
+		       " for writing!\n");
+	write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle);
+	close(*HTML_HANDLE);
+
+	# Write overview frame
+	open(*HTML_HANDLE, ">$rel_dir/$base_name.gcov.overview.html")
+		or die("ERROR: cannot open ".
+		       "$rel_dir/$base_name.gcov.overview.html".
+		       " for writing!\n");
+	write_overview(*HTML_HANDLE, $base_dir, $base_name, $pagetitle,
+		       scalar(@source));
+	close(*HTML_HANDLE);
+
+	return ($lines_found, $lines_hit, $testdata);
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test"  -> \%testdata
+#        "sum"   -> \%sumcount
+#        "func"  -> \%funcdata
+#        "found" -> $lines_found (number of instrumented lines found in file)
+#	 "hit"   -> $lines_hit (number of executed lines in file)
+#        "check" -> \%checkdata
+#
+# %testdata: name of test affecting this file -> \%testcount
+#
+# %testcount: line number -> execution count for a single test
+# %sumcount : line number -> execution count for all tests
+# %funcdata : line number -> name of function beginning at that line
+# %checkdata: line number -> checksum of source code line
+# 
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error.
+#
+
+sub read_info_file($)
+{
+	my $tracefile = $_[0];		# Name of tracefile
+	my %result;			# Resulting hash: file -> data
+	my $data;			# Data handle for current entry
+	my $testdata;			#       "             "
+	my $testcount;			#       "             "
+	my $sumcount;			#       "             "
+	my $funcdata;			#       "             "
+	my $checkdata;			#       "             "
+	my $line;			# Current line read from .info file
+	my $testname;			# Current test name
+	my $filename;			# Current filename
+	my $hitcount;			# Count for lines hit
+	my $count;			# Execution count of current line
+	my $negative;			# If set, warn about negative counts
+	my $checksum;			# Checksum of current line
+	local *INFO_HANDLE;		# Filehandle for .info file
+
+	# Check if file exists and is readable
+	stat($_[0]);
+	if (!(-r _))
+	{
+		die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	# Check if this is really a plain file
+	if (!(-f _))
+	{
+		die("ERROR: not a plain file: $_[0]!\n");
+	}
+
+	# Check for .gz extension
+	if ($_[0] =~ /\.gz$/)
+	{
+		# Check for availability of GZIP tool
+		system_no_output(1, "gunzip", "-h")
+			and die("ERROR: gunzip command not available!\n");
+
+		# Check integrity of compressed file
+		system_no_output(1, "gunzip", "-t", $_[0])
+			and die("ERROR: integrity check failed for ".
+				"compressed file $_[0]!\n");
+
+		# Open compressed file
+		open(INFO_HANDLE, "gunzip -c $_[0]|")
+			or die("ERROR: cannot start gunzip to uncompress ".
+			       "file $_[0]!\n");
+	}
+	else
+	{
+		# Open uncompressed file
+		open(INFO_HANDLE, $_[0])
+			or die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	$testname = "";
+	while (<INFO_HANDLE>)
+	{
+		chomp($_);
+		$line = $_;
+
+		# Switch statement
+		foreach ($line)
+		{
+			/^TN:(\w*(,\w+)?)/ && do
+			{
+				# Test name information found
+				$testname = defined($1) ? $1 : "";
+				last;
+			};
+
+			/^[SK]F:(.*)/ && do
+			{
+				# Filename information found
+				# Retrieve data for new entry
+				$filename = $1;
+
+				$data = $result{$filename};
+				($testdata, $sumcount, $funcdata, $checkdata) =
+					get_info_entry($data);
+
+				if (defined($testname))
+				{
+					$testcount = $testdata->{$testname};
+				}
+				else
+				{
+					my %new_hash;
+					$testcount = \%new_hash;
+				}
+				last;
+			};
+
+			/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
+			{
+				# Fix negative counts
+				$count = $2 < 0 ? 0 : $2;
+				if ($2 < 0)
+				{
+					$negative = 1;
+				}
+				# Execution count found, add to structure
+				# Add summary counts
+				$sumcount->{$1} += $count;
+
+				# Add test-specific counts
+				if (defined($testname))
+				{
+					$testcount->{$1} += $count;
+				}
+
+				# Store line checksum if available
+				if (defined($3))
+				{
+					$checksum = substr($3, 1);
+
+					# Does it match a previous definition
+					if (defined($checkdata->{$1}) &&
+					    ($checkdata->{$1} ne $checksum))
+					{
+						die("ERROR: checksum mismatch ".
+						    "at $filename:$1\n");
+					}
+
+					$checkdata->{$1} = $checksum;
+				}
+				last;
+			};
+
+			/^FN:(\d+),([^,]+)/ && do
+			{
+				# Function data found, add to structure
+				$funcdata->{$1} = $2;
+				last;
+			};
+
+			/^end_of_record/ && do
+			{
+				# Found end of section marker
+				if ($filename)
+				{
+					# Store current section data
+					if (defined($testname))
+					{
+						$testdata->{$testname} =
+							$testcount;
+					}
+					set_info_entry($data, $testdata,
+						       $sumcount, $funcdata,
+						       $checkdata);
+					$result{$filename} = $data;
+				}
+
+			};
+
+			# default
+			last;
+		}
+	}
+	close(INFO_HANDLE);
+
+	# Calculate lines_found and lines_hit for each file
+	foreach $filename (keys(%result))
+	{
+		$data = $result{$filename};
+
+		($testdata, $sumcount, $funcdata) = get_info_entry($data);
+
+		$data->{"found"} = scalar(keys(%{$sumcount}));
+		$hitcount = 0;
+
+		foreach (keys(%{$sumcount}))
+		{
+			if ($sumcount->{$_} > 0) { $hitcount++; }
+		}
+
+		$data->{"hit"} = $hitcount;
+
+		$result{$filename} = $data;
+	}
+
+	if (scalar(keys(%result)) == 0)
+	{
+		die("ERROR: no valid records found in tracefile $tracefile\n");
+	}
+	if ($negative)
+	{
+		warn("WARNING: negative counts found in tracefile ".
+		     "$tracefile\n");
+	}
+
+	return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
+#  ref, lines found, lines hit)
+#
+
+sub get_info_entry($)
+{
+	my $testdata_ref = $_[0]->{"test"};
+	my $sumcount_ref = $_[0]->{"sum"};
+	my $funcdata_ref = $_[0]->{"func"};
+	my $checkdata_ref = $_[0]->{"check"};
+	my $lines_found = $_[0]->{"found"};
+	my $lines_hit = $_[0]->{"hit"};
+
+	return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
+		$lines_found, $lines_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
+#                checkdata_ref[,lines_found, lines_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$$;$$)
+{
+	my $data_ref = $_[0];
+
+	$data_ref->{"test"} = $_[1];
+	$data_ref->{"sum"} = $_[2];
+	$data_ref->{"func"} = $_[3];
+	$data_ref->{"check"} = $_[4];
+
+	if (defined($_[5])) { $data_ref->{"found"} = $_[5]; }
+	if (defined($_[6])) { $data_ref->{"hit"} = $_[6]; }
+}
+
+
+#
+# get_prefix(filename_list)
+#
+# Search FILENAME_LIST for a directory prefix which is common to as many
+# list entries as possible, so that removing this prefix will minimize the
+# sum of the lengths of all resulting shortened filenames.
+#
+
+sub get_prefix(@)
+{
+	my @filename_list = @_;		# provided list of filenames
+	my %prefix;			# mapping: prefix -> sum of lengths
+	my $current;			# Temporary iteration variable
+
+	# Find list of prefixes
+	foreach (@filename_list)
+	{
+		# Need explicit assignment to get a copy of $_ so that
+		# shortening the contained prefix does not affect the list
+		$current = shorten_prefix($_);
+		while ($current = shorten_prefix($current))
+		{
+			# Skip rest if the remaining prefix has already been
+			# added to hash
+			if ($prefix{$current}) { last; }
+
+			# Initialize with 0
+			$prefix{$current}="0";
+		}
+
+	}
+
+	# Calculate sum of lengths for all prefixes
+	foreach $current (keys(%prefix))
+	{
+		foreach (@filename_list)
+		{
+			# Add original length
+			$prefix{$current} += length($_);
+
+			# Check whether prefix matches
+			if (substr($_, 0, length($current)) eq $current)
+			{
+				# Subtract prefix length for this filename
+				$prefix{$current} -= length($current);
+			}
+		}
+	}
+
+	# Find and return prefix with minimal sum
+	$current = (keys(%prefix))[0];
+
+	foreach (keys(%prefix))
+	{
+		if ($prefix{$_} < $prefix{$current})
+		{
+			$current = $_;
+		}
+	}
+
+	return($current);
+}
+
+
+#
+# shorten_prefix(prefix)
+#
+# Return PREFIX shortened by last directory component.
+#
+
+sub shorten_prefix($)
+{
+	my @list = split("/", $_[0]);
+
+	pop(@list);
+	return join("/", @list);
+}
+
+
+
+#
+# get_dir_list(filename_list)
+#
+# Return sorted list of directories for each entry in given FILENAME_LIST.
+#
+
+sub get_dir_list(@)
+{
+	my %result;
+
+	foreach (@_)
+	{
+		$result{shorten_prefix($_)} = "";
+	}
+
+	return(sort(keys(%result)));
+}
+
+
+#
+# get_relative_base_path(subdirectory)
+#
+# Return a relative path string which references the base path when applied
+# in SUBDIRECTORY.
+#
+# Example: get_relative_base_path("fs/mm") -> "../../"
+#
+
+sub get_relative_base_path($)
+{
+	my $result = "";
+	my $index;
+
+	# Make an empty directory path a special case
+	if (!$_[0]) { return(""); }
+
+	# Count number of /s in path
+	$index = ($_[0] =~ s/\//\//g);
+
+	# Add a ../ to $result for each / in the directory path + 1
+	for (; $index>=0; $index--)
+	{
+		$result .= "../";
+	}
+
+	return $result;
+}
+
+
+#
+# read_testfile(test_filename)
+#
+# Read in file TEST_FILENAME which contains test descriptions in the format:
+#
+#   TN:<whitespace><test name>
+#   TD:<whitespace><test description>
+#
+# for each test case. Return a reference to a hash containing a mapping
+#
+#   test name -> test description.
+#
+# Die on error.
+#
+
+sub read_testfile($)
+{
+	my %result;
+	my $test_name;
+	local *TEST_HANDLE;
+
+	open(TEST_HANDLE, "<".$_[0])
+		or die("ERROR: cannot open $_[0]!\n");
+
+	while (<TEST_HANDLE>)
+	{
+		chomp($_);
+
+		# Match lines beginning with TN:<whitespace(s)>
+		if (/^TN:\s+(.*?)\s*$/)
+		{
+			# Store name for later use
+			$test_name = $1;
+		}
+
+		# Match lines beginning with TD:<whitespace(s)>
+		if (/^TD:\s+(.*?)\s*$/)
+		{
+			# Check for empty line
+			if ($1)
+			{
+				# Add description to hash
+				$result{$test_name} .= " $1";
+			}
+			else
+			{
+				# Add empty line
+				$result{$test_name} .= "\n\n";
+			}
+		}
+	}
+
+	close(TEST_HANDLE);
+
+	return \%result;
+}
+
+
+#
+# escape_html(STRING)
+#
+# Return a copy of STRING in which all occurrences of HTML special characters
+# are escaped.
+#
+
+sub escape_html($)
+{
+	my $string = $_[0];
+
+	if (!$string) { return ""; }
+
+	$string =~ s/&/&amp;/g;		# & -> &amp;
+	$string =~ s/</&lt;/g;		# < -> &lt;
+	$string =~ s/>/&gt;/g;		# > -> &gt;
+	$string =~ s/\"/&quot;/g;	# " -> &quot;
+
+	while ($string =~ /^([^\t]*)(\t)/)
+	{
+		my $replacement = " "x($tab_size - (length($1) % $tab_size));
+		$string =~ s/^([^\t]*)(\t)/$1$replacement/;
+	}
+
+	$string =~ s/\n/<br>/g;		# \n -> <br>
+
+	return $string;
+}
+
+
+#
+# get_date_string()
+#
+# Return the current date in the form: yyyy-mm-dd
+#
+
+sub get_date_string()
+{
+	my $year;
+	my $month;
+	my $day;
+
+	($year, $month, $day) = (localtime())[5, 4, 3];
+
+	return sprintf("%d-%02d-%02d", $year+1900, $month+1, $day);
+}
+
+
+#
+# create_sub_dir(dir_name)
+#
+# Create subdirectory DIR_NAME if it does not already exist, including all its
+# parent directories.
+#
+# Die on error.
+#
+
+sub create_sub_dir($)
+{
+	system("mkdir", "-p" ,$_[0])
+		and die("ERROR: cannot create directory $_!\n");
+}
+
+
+#
+# write_description_file(descriptions, overall_found, overall_hit)
+#
+# Write HTML file containing all test case descriptions. DESCRIPTIONS is a
+# reference to a hash containing a mapping
+#
+#   test case name -> test case description
+#
+# Die on error.
+#
+
+sub write_description_file($$$)
+{
+	my %description = %{$_[0]};
+	my $found = $_[1];
+	my $hit = $_[2];
+	my $test_name;
+	local *HTML_HANDLE;
+
+	open(HTML_HANDLE, ">descriptions.html")
+		or die("ERROR: cannot open descriptions.html for writing!\n");
+
+	write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions");
+	write_header(*HTML_HANDLE, 3, "", "", $found, $hit);
+
+	write_test_table_prolog(*HTML_HANDLE,
+			 "Test case descriptions - alphabetical list");
+
+	foreach $test_name (sort(keys(%description)))
+	{
+		write_test_table_entry(*HTML_HANDLE, $test_name,
+				       escape_html($description{$test_name}));
+	}
+
+	write_test_table_epilog(*HTML_HANDLE);
+	write_html_epilog(*HTML_HANDLE, "");
+
+	close(HTML_HANDLE);
+}
+
+
+
+#
+# write_png_files()
+#
+# Create all necessary .png files for the HTML-output in the current
+# directory. .png-files are used as bar graphs.
+#
+# Die on error.
+#
+
+sub write_png_files()
+{
+	my %data;
+	local *PNG_HANDLE;
+
+	$data{"ruby.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57, 
+		 0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f, 
+		 0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+		 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"amber.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb, 
+		 0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50, 
+		 0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+	  	 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+  		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+  		 0x82];
+	$data{"emerald.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5, 
+		 0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59, 
+		 0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+		 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"snow.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d, 
+		 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc, 
+		 0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73, 
+		 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2, 
+		 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, 
+		 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 
+		 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00, 
+		 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00, 
+		 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 
+		 0x82];
+	$data{"glass.png"} =
+		[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 
+		 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 
+		 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 
+		 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d, 
+		 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00, 
+		 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff, 
+		 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00, 
+		 0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66, 
+		 0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, 0x00, 0x88, 
+		 0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 
+		 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 
+		 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 
+		 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, 0x19, 0xc4, 
+		 0x40, 0x56, 0x10, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41, 
+		 0x54, 0x78, 0x9c, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 
+		 0x01, 0x48, 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49, 
+		 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82];
+
+	foreach (keys(%data))
+	{
+		open(PNG_HANDLE, ">".$_)
+			or die("ERROR: cannot create $_!\n");
+		binmode(PNG_HANDLE);
+		print(PNG_HANDLE map(chr,@{$data{$_}}));
+		close(PNG_HANDLE);
+	}
+}
+
+
+#
+# write_css_file()
+#
+# Write the cascading style sheet file gcov.css to the current directory.
+# This file defines basic layout attributes of all generated HTML pages.
+#
+
+sub write_css_file()
+{
+	local *CSS_HANDLE;
+
+	# Check for a specified external style sheet file
+	if ($css_filename)
+	{
+		# Simply copy that file
+		system("cp", $css_filename, "gcov.css")
+			and die("ERROR: cannot copy file $css_filename!\n");
+		return;
+	}
+
+	open(CSS_HANDLE, ">gcov.css")
+		or die ("ERROR: cannot open gcov.css for writing!\n");
+
+
+	# *************************************************************
+
+	my $css_data = ($_=<<"END_OF_CSS")
+	/* All views: initial background and text color */
+	body
+	{
+	  color:            #000000;
+	  background-color: #FFFFFF;
+	}
+
+
+	/* All views: standard link format*/
+	a:link
+	{
+	  color:           #284FA8;
+	  text-decoration: underline;
+	}
+
+
+	/* All views: standard link - visited format */
+	a:visited
+	{
+	  color:           #00CB40;
+	  text-decoration: underline;
+	}
+
+
+	/* All views: standard link - activated format */
+	a:active
+	{
+	  color:           #FF0040;
+	  text-decoration: underline;
+	}
+
+
+	/* All views: main title format */
+	td.title
+	{
+	  text-align:     center;
+	  padding-bottom: 10px;
+	  font-family:    sans-serif;
+	  font-size:      20pt;
+	  font-style:     italic;
+	  font-weight:    bold;
+	}
+
+
+	/* All views: header item format */
+	td.headerItem
+	{
+	  text-align:    right;
+	  padding-right: 6px;
+	  font-family:   sans-serif;
+	  font-weight:   bold;
+	}
+
+
+	/* All views: header item value format */
+	td.headerValue
+	{
+	  text-align:  left;
+	  color:       #284FA8;
+	  font-family: sans-serif;
+	  font-weight: bold;
+	}
+
+
+	/* All views: color of horizontal ruler */
+	td.ruler
+	{
+	  background-color: #6688D4;
+	}
+
+
+	/* All views: version string format */
+	td.versionInfo
+	{
+	  text-align:   center;
+	  padding-top:  2px;
+	  font-family:  sans-serif;
+	  font-style:   italic;
+	}
+
+
+	/* Directory view/File view (all)/Test case descriptions:
+	   table headline format */
+	td.tableHead
+	{
+	  text-align:       center;
+	  color:            #FFFFFF;
+	  background-color: #6688D4;
+	  font-family:      sans-serif;
+	  font-size:        120%;
+	  font-weight:      bold;
+	}
+
+
+	/* Directory view/File view (all): filename entry format */
+	td.coverFile
+	{
+	  text-align:       left;
+	  padding-left:     10px;
+	  padding-right:    20px; 
+	  color:            #284FA8;
+	  background-color: #DAE7FE;
+	  font-family:      monospace;
+	}
+
+
+	/* Directory view/File view (all): bar-graph entry format*/
+	td.coverBar
+	{
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #DAE7FE;
+	}
+
+
+	/* Directory view/File view (all): bar-graph outline color */
+	td.coverBarOutline
+	{
+	  background-color: #000000;
+	}
+
+
+	/* Directory view/File view (all): percentage entry for files with
+	   high coverage rate */
+	td.coverPerHi
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #DAE7FE;
+	  font-weight:      bold;
+	}
+
+
+	/* Directory view/File view (all): line count entry for files with
+	   high coverage rate */
+	td.coverNumHi
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #DAE7FE;
+	}
+
+
+	/* Directory view/File view (all): percentage entry for files with
+	   medium coverage rate */
+	td.coverPerMed
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #FFEA20;
+	  font-weight:      bold;
+	}
+
+
+	/* Directory view/File view (all): line count entry for files with
+	   medium coverage rate */
+	td.coverNumMed
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #FFEA20;
+	}
+
+
+	/* Directory view/File view (all): percentage entry for files with
+	   low coverage rate */
+	td.coverPerLo
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #FF0000;
+	  font-weight:      bold;
+	}
+
+
+	/* Directory view/File view (all): line count entry for files with
+	   low coverage rate */
+	td.coverNumLo
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px;
+	  background-color: #FF0000;
+	}
+
+
+	/* File view (all): "show/hide details" link format */
+	a.detail:link
+	{
+	  color: #B8D0FF;
+	}
+
+
+	/* File view (all): "show/hide details" link - visited format */
+	a.detail:visited
+	{
+	  color: #B8D0FF;
+	}
+
+
+	/* File view (all): "show/hide details" link - activated format */
+	a.detail:active
+	{
+	  color: #FFFFFF;
+	}
+
+
+	/* File view (detail): test name table headline format */
+	td.testNameHead
+	{
+	  text-align:       right;
+	  padding-right:    10px;
+	  background-color: #DAE7FE;
+	  font-family:      sans-serif;
+	  font-weight:      bold;
+	}
+
+
+	/* File view (detail): test lines table headline format */
+	td.testLinesHead
+	{
+	  text-align:       center;
+	  background-color: #DAE7FE;
+	  font-family:      sans-serif;
+	  font-weight:      bold;
+	}
+
+
+	/* File view (detail): test name entry */
+	td.testName
+	{
+	  text-align:       right;
+	  padding-right:    10px;
+	  background-color: #DAE7FE;
+	}
+
+
+	/* File view (detail): test percentage entry */
+	td.testPer
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px; 
+	  background-color: #DAE7FE;
+	}
+
+
+	/* File view (detail): test lines count entry */
+	td.testNum
+	{
+	  text-align:       right;
+	  padding-left:     10px;
+	  padding-right:    10px; 
+	  background-color: #DAE7FE;
+	}
+
+
+	/* Test case descriptions: test name format*/
+	dt
+	{
+	  font-family: sans-serif;
+	  font-weight: bold;
+	}
+
+
+	/* Test case descriptions: description table body */
+	td.testDescription
+	{
+	  padding-top:      10px;
+	  padding-left:     30px;
+	  padding-bottom:   10px;
+	  padding-right:    30px;
+	  background-color: #DAE7FE;
+	}
+
+
+	/* Source code view: source code format */
+	pre.source
+	{
+	  font-family: monospace;
+	  white-space: pre;
+	}
+
+	/* Source code view: line number format */
+	span.lineNum
+	{
+	  background-color: #EFE383;
+	}
+
+
+	/* Source code view: format for lines which were executed */
+	span.lineCov
+	{
+	  background-color: #CAD7FE;
+	}
+
+
+	/* Source code view: format for lines which were not executed */
+	span.lineNoCov
+	{
+	  background-color: #FF6230;
+	}
+
+
+	/* Source code view: format for lines which were executed only in a
+	   previous version */
+	span.lineDiffCov
+	{
+	  background-color: #B5F7AF;
+	}
+END_OF_CSS
+	;
+
+	# *************************************************************
+
+
+	# Remove leading tab from all lines
+	$css_data =~ s/^\t//gm;
+
+	print(CSS_HANDLE $css_data);
+
+	close(CSS_HANDLE);
+}
+
+
+#
+# get_bar_graph_code(base_dir, cover_found, cover_hit)
+#
+# Return a string containing HTML code which implements a bar graph display
+# for a coverage rate of cover_hit * 100 / cover_found.
+#
+
+sub get_bar_graph_code($$$)
+{
+	my $rate;
+	my $alt;
+	my $width;
+	my $remainder;
+	my $png_name;
+	my $graph_code;
+
+	# Check number of instrumented lines
+	if ($_[1] == 0) { return ""; }
+
+	$rate		= $_[2] * 100 / $_[1];
+	$alt		= sprintf("%.1f", $rate)."%";
+	$width		= sprintf("%.0f", $rate);
+	$remainder	= sprintf("%d", 100-$width);
+
+	# Decide which .png file to use
+	if ($rate < $med_limit)		{ $png_name = "ruby.png"; }
+	elsif ($rate < $hi_limit)	{ $png_name = "amber.png"; }
+	else				{ $png_name = "emerald.png"; }
+
+	if ($width == 0)
+	{
+		# Zero coverage
+		$graph_code = (<<END_OF_HTML)
+	        <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]snow.png" width=100 height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+		;
+	}
+	elsif ($width == 100)
+	{
+		# Full coverage
+		$graph_code = (<<END_OF_HTML)
+		<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=100 height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+		;
+	}
+	else
+	{
+		# Positive coverage
+		$graph_code = (<<END_OF_HTML)
+		<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=$width height=10 alt="$alt"><img src="$_[0]snow.png" width=$remainder height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+		;
+	}
+
+	# Remove leading tabs from all lines
+	$graph_code =~ s/^\t+//gm;
+	chomp($graph_code);
+
+	return($graph_code);
+}
+
+
+#
+# write_html(filehandle, html_code)
+#
+# Write out HTML_CODE to FILEHANDLE while removing a leading tabulator mark
+# in each line of HTML_CODE.
+#
+
+sub write_html(*$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $html_code = $_[1];
+
+	# Remove leading tab from all lines
+	$html_code =~ s/^\t//gm;
+
+	print(HTML_HANDLE $html_code)
+		or die("ERROR: cannot write HTML data ($!)\n");
+}
+
+
+#
+# write_html_prolog(filehandle, base_dir, pagetitle)
+#
+# Write an HTML prolog common to all HTML files to FILEHANDLE. PAGETITLE will
+# be used as HTML page title. BASE_DIR contains a relative path which points
+# to the base directory.
+#
+
+sub write_html_prolog(*$$)
+{
+	my $pagetitle = $_[2];
+
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+
+	<html lang="en">
+
+	<head>
+	  <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+	  <title>$pagetitle</title>
+	  <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+	</head>
+
+	<body>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_header_prolog(filehandle, base_dir)
+#
+# Write beginning of page header HTML code.
+#
+
+sub write_header_prolog(*$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <table width="100%" border=0 cellspacing=0 cellpadding=0>
+	    <tr><td class="title">$title</td></tr>
+	    <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+
+	    <tr>
+	      <td width="100%">
+	        <table cellpadding=1 border=0 width="100%">
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_header_line(filehandle, item1, value1, [item2, value2])
+#
+# Write a header line, containing of either one or two pairs "header item"
+# and "header value".
+#
+
+sub write_header_line(*$$;$$)
+{
+	my $item1 = $_[1];
+	my $value1 = $_[2];
+
+	# Use GOTO to prevent indenting HTML with more than one tabs
+	if (scalar(@_) > 3) { goto two_items }
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	        <tr>
+	          <td class="headerItem" width="20%">$item1:</td>
+	          <td class="headerValue" width="80%" colspan=4>$value1</td>
+	        </tr>
+END_OF_HTML
+	;
+
+	return();
+
+	# *************************************************************
+
+
+two_items:
+	my $item2 = $_[3];
+	my $value2 = $_[4];
+
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+        <tr>
+          <td class="headerItem" width="20%">$item1:</td>
+          <td class="headerValue" width="20%">$value1</td>
+          <td width="20%"></td>
+          <td class="headerItem" width="20%">$item2:</td>
+          <td class="headerValue" width="20%">$value2</td>
+        </tr>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_header_epilog(filehandle, base_dir)
+#
+# Write end of page header HTML code.
+#
+
+sub write_header_epilog(*$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	        </table>
+	      </td>
+	    </tr>
+	    <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+	  </table>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_prolog(filehandle, left_heading, right_heading)
+#
+# Write heading for file table.
+#
+
+sub write_file_table_prolog(*$$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <center>
+	  <table width="80%" cellpadding=2 cellspacing=1 border=0>
+
+	    <tr>
+	      <td width="50%"><br></td>
+	      <td width="15%"></td>
+	      <td width="15%"></td>
+	      <td width="20%"></td>
+	    </tr>
+
+	    <tr>
+	      <td class="tableHead">$_[1]</td>
+	      <td class="tableHead" colspan=3>$_[2]</td>
+	    </tr>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_entry(filehandle, cover_filename, cover_bar_graph,
+#                        cover_found, cover_hit)
+#
+# Write an entry of the file table.
+#
+
+sub write_file_table_entry(*$$$$)
+{
+	my $rate;
+	my $rate_string;
+	my $classification = "Lo";
+
+	if ($_[3]>0)
+	{
+		$rate = $_[4] * 100 / $_[3];
+		$rate_string = sprintf("%.1f", $rate)."&nbsp;%";
+
+		if ($rate < $med_limit)		{ $classification = "Lo"; }
+		elsif ($rate < $hi_limit)	{ $classification = "Med"; }
+		else				{ $classification = "Hi"; }
+	}
+	else
+	{
+		$rate_string = "undefined";
+	}
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	    <tr>
+	      <td class="coverFile">$_[1]</td>
+	      <td class="coverBar" align="center">
+	        $_[2]
+	      </td>
+	      <td class="coverPer$classification">$rate_string</td>
+	      <td class="coverNum$classification">$_[4]&nbsp;/&nbsp;$_[3]&nbsp;lines</td>
+	    </tr>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_detail_heading(filehandle, left_heading, right_heading)
+#
+# Write heading for detail section in file table.
+#
+
+sub write_file_table_detail_heading(*$$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	    <tr>
+	      <td class="testNameHead" colspan=2>$_[1]</td>
+	      <td class="testLinesHead" colspan=2>$_[2]</td>
+	    </tr>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_detail_entry(filehandle, test_name, cover_found, cover_hit)
+#
+# Write entry for detail section in file table.
+#
+
+sub write_file_table_detail_entry(*$$$)
+{
+	my $rate;
+	my $name = $_[1];
+	
+	if ($_[2]>0)
+	{
+		$rate = sprintf("%.1f", $_[3]*100/$_[2])."&nbsp;%";
+	}
+	else
+	{
+		$rate = "undefined";
+	}
+
+	if ($name =~ /^(.*),diff$/)
+	{
+		$name = $1." (converted)";
+	}
+
+	if ($name eq "")
+	{
+		$name = "<span style=\"font-style:italic\">&lt;unnamed&gt;</span>";
+	}
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	    <tr>
+	      <td class="testName" colspan=2>$name</td>
+	      <td class="testPer">$rate</td>
+	      <td class="testNum">$_[3]&nbsp;/&nbsp;$_[2]&nbsp;lines</td>
+	    </tr>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_file_table_epilog(filehandle)
+#
+# Write end of file table HTML code.
+#
+
+sub write_file_table_epilog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  </table>
+	  </center>
+	  <br>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_test_table_prolog(filehandle, table_heading)
+#
+# Write heading for test case description table.
+#
+
+sub write_test_table_prolog(*$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <center>
+	  <table width="80%" cellpadding=2 cellspacing=1 border=0>
+
+	    <tr>
+	      <td><br></td>
+	    </tr>
+
+	    <tr>
+	      <td class="tableHead">$_[1]</td>
+	    </tr>
+
+	    <tr>
+	      <td class="testDescription">
+	        <dl>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_test_table_entry(filehandle, test_name, test_description)
+#
+# Write entry for the test table.
+#
+
+sub write_test_table_entry(*$$)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+          <dt>$_[1]<a name="$_[1]">&nbsp;</a></dt>
+          <dd>$_[2]<br><br></dd>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_test_table_epilog(filehandle)
+#
+# Write end of test description table HTML code.
+#
+
+sub write_test_table_epilog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	        </dl>
+	      </td>
+	    </tr>
+	  </table>
+	  </center>
+	  <br>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_source_prolog(filehandle)
+#
+# Write start of source code table.
+#
+
+sub write_source_prolog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <table cellpadding=0 cellspacing=0 border=0>
+	    <tr>
+	      <td><br></td>
+	    </tr>
+	    <tr>
+	      <td><pre class="source">
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_source_line(filehandle, line_num, source, hit_count, converted)
+#
+# Write formatted source code line. Return a line in a format as needed
+# by gen_png()
+#
+
+sub write_source_line(*$$$$)
+{
+	my $source_format;
+	my $count;
+	my $result;
+	my $anchor_start = "";
+	my $anchor_end = "";
+
+	if (!(defined$_[3]))
+	{
+		$result		= "";
+		$source_format	= "";
+		$count		= " "x15;
+	}
+	elsif ($_[3] == 0)
+	{
+		$result		= $_[3];
+		$source_format	= '<span class="lineNoCov">';
+		$count		= sprintf("%15d", $_[3]);
+	}
+	elsif ($_[4] && defined($highlight))
+	{
+		$result		= "*".$_[3];
+		$source_format	= '<span class="lineDiffCov">';
+		$count		= sprintf("%15d", $_[3]);
+	}
+	else
+	{
+		$result		= $_[3];
+		$source_format	= '<span class="lineCov">';
+		$count		= sprintf("%15d", $_[3]);
+	}
+
+	$result .= ":".$_[2];
+
+	# Write out a line number navigation anchor every $nav_resolution
+	# lines if necessary
+	if ($frames && (($_[1] - 1) % $nav_resolution == 0))
+	{
+		$anchor_start	= "<a name=\"$_[1]\">";
+		$anchor_end	= "</a>";
+	}
+
+
+	# *************************************************************
+
+	write_html($_[0],
+		   $anchor_start.
+		   '<span class="lineNum">'.sprintf("%8d", $_[1]).
+		   " </span>$source_format$count : ".
+		   escape_html($_[2]).($source_format?"</span>":"").
+		   $anchor_end."\n");
+
+	# *************************************************************
+
+	return($result);
+}
+
+
+#
+# write_source_epilog(filehandle)
+#
+# Write end of source code table.
+#
+
+sub write_source_epilog(*)
+{
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	</pre>
+	      </td>
+	    </tr>
+	  </table>
+	  <br>
+
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_html_epilog(filehandle, base_dir[, break_frames])
+#
+# Write HTML page footer to FILEHANDLE. BREAK_FRAMES should be set when
+# this page is embedded in a frameset, clicking the URL link will then
+# break this frameset.
+#
+
+sub write_html_epilog(*$;$)
+{
+	my $break_code = "";
+
+	if (defined($_[2]))
+	{
+		$break_code = " target=\"_parent\"";
+	}
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  <table width="100%" border=0 cellspacing=0 cellpadding=0>
+	  <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+	  <tr><td class="versionInfo">Generated by: <a href="$lcov_url"$break_code>$lcov_version</a></td></tr>
+	  </table>
+	  <br>
+
+	</body>
+	</html>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_frameset(filehandle, basedir, basename, pagetitle)
+#
+#
+
+sub write_frameset(*$$$)
+{
+	my $frame_width = $overview_width + 40;
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN">
+
+	<html lang="en">
+
+	<head>
+	  <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+	  <title>$_[3]</title>
+	  <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+	</head>
+
+	<frameset cols="$frame_width,*">
+	  <frame src="$_[2].gcov.overview.html" name="overview">
+	  <frame src="$_[2].gcov.html" name="source">
+	  <noframes>
+	    <center>Frames not supported by your browser!<br></center>
+	  </noframes>
+	</frameset>
+
+	</html>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# sub write_overview_line(filehandle, basename, line, link)
+#
+#
+
+sub write_overview_line(*$$$)
+{
+	my $y1 = $_[2] - 1;
+	my $y2 = $y1 + $nav_resolution - 1;
+	my $x2 = $overview_width - 1;
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	    <area shape="rect" coords="0,$y1,$x2,$y2" href="$_[1].gcov.html#$_[3]" target="source" alt="overview">
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_overview(filehandle, basedir, basename, pagetitle, lines)
+#
+#
+
+sub write_overview(*$$$$)
+{
+	my $index;
+	my $max_line = $_[4] - 1;
+	my $offset;
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+
+	<html lang="en">
+
+	<head>
+	  <title>$_[3]</title>
+	  <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+	  <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+	</head>
+
+	<body>
+	  <map name="overview">
+END_OF_HTML
+	;
+
+	# *************************************************************
+
+	# Make $offset the next higher multiple of $nav_resolution
+	$offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution;
+	$offset = sprintf("%d", $offset ) * $nav_resolution;
+
+	# Create image map for overview image
+	for ($index = 1; $index <= $_[4]; $index += $nav_resolution)
+	{
+		# Enforce nav_offset
+		if ($index < $offset + 1)
+		{
+			write_overview_line($_[0], $_[2], $index, 1);
+		}
+		else
+		{
+			write_overview_line($_[0], $_[2], $index, $index - $offset);
+		}
+	}
+
+	# *************************************************************
+
+	write_html($_[0], <<END_OF_HTML)
+	  </map>
+
+	  <center>
+	  <a href="$_[2].gcov.html#top" target="source">Top</a><br><br>
+	  <img src="$_[2].gcov.png" width=$overview_width height=$max_line alt="Overview" border=0 usemap="#overview">
+	  </center>
+	</body>
+	</html>
+END_OF_HTML
+	;
+
+	# *************************************************************
+}
+
+
+#
+# write_header(filehandle, type, trunc_file_name, rel_file_name, lines_found,
+# lines_hit)
+#
+# Write a complete standard page header. TYPE may be (0, 1, 2, 3)
+# corresponding to (directory view header, file view header, source view
+# header, test case description header)
+#
+
+sub write_header(*$$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $type = $_[1];
+	my $trunc_name = $_[2];
+	my $rel_filename = $_[3];
+	my $lines_found = $_[4];
+	my $lines_hit = $_[5];
+	my $base_dir;
+	my $view;
+	my $test;
+	my $rate;
+	my $base_name;
+
+	# Calculate coverage rate
+	if ($lines_found>0)
+	{
+		$rate = sprintf("%.1f", $lines_hit * 100 / $lines_found)." %";
+	}
+	else
+	{
+		$rate = "-";
+	}
+
+	$base_name = basename($rel_filename);
+
+	# Prepare text for "current view" field
+	if ($type == 0)
+	{
+		# Main overview
+		$base_dir = "";
+		$view = $overview_title;
+	}
+	elsif ($type == 1)
+	{
+		# Directory overview
+		$base_dir = get_relative_base_path($rel_filename);
+		$view = "<a href=\"$base_dir"."index.html\">".
+			"$overview_title</a> - $trunc_name";
+	}
+	elsif ($type == 2)
+	{
+		# File view
+		my $dir_name = dirname($rel_filename);
+
+		
+
+		$base_dir = get_relative_base_path($dir_name);
+		if ($frames)
+		{
+			# Need to break frameset when clicking any of these
+			# links
+			$view = "<a href=\"$base_dir"."index.html\" ".
+				"target=\"_parent\">$overview_title</a> - ".
+				"<a href=\"index.html\" target=\"_parent\">".
+				"$dir_name</a> - $base_name";
+		}
+		else
+		{
+			$view = "<a href=\"$base_dir"."index.html\">".
+				"$overview_title</a> - ".
+				"<a href=\"index.html\">".
+				"$dir_name</a> - $base_name";
+		}
+	}
+	elsif ($type == 3)
+	{
+		# Test description header
+		$base_dir = "";
+		$view = "<a href=\"$base_dir"."index.html\">".
+			"$overview_title</a> - test case descriptions";
+	}
+
+	# Prepare text for "test" field
+	$test = escape_html($test_title);
+
+	# Append link to test description page if available
+	if (%test_description && ($type != 3))
+	{
+		if ($frames && ($type == 2))
+		{
+			# Need to break frameset when clicking this link
+			$test .= " ( <a href=\"$base_dir".
+				 "descriptions.html\" target=\"_parent\">".
+				 "view test case descriptions</a> )";
+		}
+		else
+		{
+			$test .= " ( <a href=\"$base_dir".
+				 "descriptions.html\">".
+				 "view test case descriptions</a> )";
+		}
+	}
+
+	# Write header
+	write_header_prolog(*HTML_HANDLE, $base_dir);
+	write_header_line(*HTML_HANDLE, "Current&nbsp;view", $view);
+	write_header_line(*HTML_HANDLE, "Test", $test);
+	write_header_line(*HTML_HANDLE, "Date", $date,
+			  "Instrumented&nbsp;lines", $lines_found);
+	write_header_line(*HTML_HANDLE, "Code&nbsp;covered", $rate,
+			  "Executed&nbsp;lines", $lines_hit);
+	write_header_epilog(*HTML_HANDLE, $base_dir);
+}
+
+
+#
+# split_filename(filename)
+#
+# Return (path, filename, extension) for a given FILENAME.
+#
+
+sub split_filename($)
+{
+	if (!$_[0]) { return(); }
+	my @path_components = split('/', $_[0]);
+	my @file_components = split('\.', pop(@path_components));
+	my $extension = pop(@file_components);
+
+	return (join("/",@path_components), join(".",@file_components),
+		$extension);
+}
+
+
+#
+# write_file_table(filehandle, base_dir, overview, testhash, fileview)
+#
+# Write a complete file table. OVERVIEW is a reference to a hash containing
+# the following mapping:
+#
+#   filename -> "lines_found,lines_hit,page_link"
+#
+# TESTHASH is a reference to the following hash:
+#
+#   filename -> \%testdata
+#   %testdata: name of test affecting this file -> \%testcount
+#   %testcount: line number -> execution count for a single test
+#
+# Heading of first column is "Filename" if FILEVIEW is true, "Directory name"
+# otherwise.
+#
+
+sub write_file_table(*$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	my $base_dir = $_[1];
+	my %overview = %{$_[2]};
+	my %testhash = %{$_[3]};
+	my $fileview = $_[4];
+	my $filename;
+	my $bar_graph;
+	my $hit;
+	my $found;
+	my $page_link;
+	my $testname;
+	my $testdata;
+	my $testcount;
+	my %affecting_tests;
+	my $coverage_heading = "Coverage";
+
+	# Provide a link to details/non-detail list if this is directory
+	# overview and we are supposed to create a detail view
+	if (($base_dir ne "") && $show_details)
+	{
+		if (%testhash)
+		{
+			# This is the detail list, provide link to standard
+			# list
+			$coverage_heading .= " ( <a class=\"detail\" href=\"".
+					     "index.html\">hide ".
+					     "details</a> )";
+		}
+		else
+		{
+			# This is the standard list, provide link to detail
+			# list
+			$coverage_heading .= " ( <a class=\"detail\" href=\"".
+					     "index-detail.html\">show ".
+					     "details</a> )";
+		}
+	}
+
+	write_file_table_prolog(*HTML_HANDLE,
+				$fileview ? "Filename" : "Directory&nbsp;name",
+				$coverage_heading);
+
+	foreach $filename (sort(keys(%overview)))
+	{
+		($found, $hit, $page_link) = split(",", $overview{$filename});
+		$bar_graph = get_bar_graph_code($base_dir, $found, $hit);
+
+		$testdata = $testhash{$filename};
+
+		# Add anchor tag in case a page link is provided
+		if ($page_link)
+		{
+			$filename = "<a href=\"$page_link\">$filename</a>";
+		}
+
+		write_file_table_entry(*HTML_HANDLE, $filename, $bar_graph,
+				       $found, $hit);
+
+		# Check whether we should write test specific coverage
+		# as well
+		if (!($show_details && $testdata)) { next; }
+
+		# Filter out those tests that actually affect this file
+		%affecting_tests = %{ get_affecting_tests($testdata) };
+
+		# Does any of the tests affect this file at all?
+		if (!%affecting_tests) { next; }
+
+		# Write test details for this entry
+		write_file_table_detail_heading(*HTML_HANDLE, "Test name",
+						"Lines hit");
+
+		foreach $testname (keys(%affecting_tests))
+		{
+			($found, $hit) =
+				split(",", $affecting_tests{$testname});
+
+			# Insert link to description of available
+			if ($test_description{$testname})
+			{
+				$testname = "<a href=\"$base_dir".
+					    "descriptions.html#$testname\">".
+					    "$testname</a>";
+			}
+
+			write_file_table_detail_entry(*HTML_HANDLE, $testname,
+				$found, $hit);
+		}
+	}
+
+	write_file_table_epilog(*HTML_HANDLE);
+}
+
+
+#
+# get_found_and_hit(hash)
+#
+# Return the count for entries (found) and entries with an execution count
+# greater than zero (hit) in a hash (linenumber -> execution count) as
+# a list (found, hit)
+#
+
+sub get_found_and_hit($)
+{
+	my %hash = %{$_[0]};
+	my $found = 0;
+	my $hit = 0;
+
+	# Calculate sum
+	$found = 0;
+	$hit = 0;
+			
+	foreach (keys(%hash))
+	{
+		$found++;
+		if ($hash{$_}>0) { $hit++; }
+	}
+
+	return ($found, $hit);
+}
+
+
+#
+# get_affecting_tests(hashref)
+#
+# HASHREF contains a mapping filename -> (linenumber -> exec count). Return
+# a hash containing mapping filename -> "lines found, lines hit" for each
+# filename which has a nonzero hit count.
+#
+
+sub get_affecting_tests($)
+{
+	my %hash = %{$_[0]};
+	my $testname;
+	my $testcount;
+	my %result;
+	my $found;
+	my $hit;
+
+	foreach $testname (keys(%hash))
+	{
+		# Get (line number -> count) hash for this test case
+		$testcount = $hash{$testname};
+
+		# Calculate sum
+		($found, $hit) = get_found_and_hit($testcount);
+
+		if ($hit>0)
+		{
+			$result{$testname} = "$found,$hit";
+		}
+	}
+
+	return(\%result);
+}
+
+
+#
+# write_source(filehandle, source_filename, count_data, checksum_data,
+#              converted_data)
+#
+# Write an HTML view of a source code file. Returns a list containing
+# data as needed by gen_png().
+#
+# Die on error.
+#
+
+sub write_source($$$$$)
+{
+	local *HTML_HANDLE = $_[0];
+	local *SOURCE_HANDLE;
+	my $source_filename = $_[1];
+	my %count_data;
+	my $line_number;
+	my @result;
+	my $checkdata = $_[3];
+	my $converted = $_[4];
+
+	if ($_[2])
+	{
+		%count_data = %{$_[2]};
+	}
+
+	open(SOURCE_HANDLE, "<".$source_filename)
+		or die("ERROR: cannot open $source_filename for reading!\n");
+	
+	write_source_prolog(*HTML_HANDLE);
+
+	for ($line_number = 1; <SOURCE_HANDLE> ; $line_number++)
+	{
+		chomp($_);
+
+		# Source code matches coverage data?
+		if (defined($checkdata->{$line_number}) &&
+		    ($checkdata->{$line_number} ne md5_base64($_)))
+		{
+			die("ERROR: checksum mismatch  at $source_filename:".
+			    "$line_number\n");
+		}
+
+		push (@result,
+		      write_source_line(HTML_HANDLE, $line_number,
+					$_, $count_data{$line_number},
+					$converted->{$line_number}));
+	}
+
+	close(SOURCE_HANDLE);
+	write_source_epilog(*HTML_HANDLE);
+	return(@result);
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+	if (!$quiet)
+	{
+		# Print info string
+		printf(@_);
+	}
+}
+
+
+#
+# subtract_counts(data_ref, base_ref)
+#
+
+sub subtract_counts($$)
+{
+	my %data = %{$_[0]};
+	my %base = %{$_[1]};
+	my $line;
+	my $data_count;
+	my $base_count;
+	my $hit = 0;
+	my $found = 0;
+
+	foreach $line (keys(%data))
+	{
+		$found++;
+		$data_count = $data{$line};
+		$base_count = $base{$line};
+
+		if (defined($base_count))
+		{
+			$data_count -= $base_count;
+
+			# Make sure we don't get negative numbers
+			if ($data_count<0) { $data_count = 0; }
+		}
+
+		$data{$line} = $data_count;
+		if ($data_count > 0) { $hit++; }
+	}
+
+	return (\%data, $found, $hit);
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+#   line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+	my %data1 = %{$_[0]};	# Hash 1
+	my %data2 = %{$_[1]};	# Hash 2
+	my %result;		# Resulting hash
+	my $line;		# Current line iteration scalar
+	my $data1_count;	# Count of line in hash1
+	my $data2_count;	# Count of line in hash2
+	my $found = 0;		# Total number of lines found
+	my $hit = 0;		# Number of lines with a count > 0
+
+	foreach $line (keys(%data1))
+	{
+		$data1_count = $data1{$line};
+		$data2_count = $data2{$line};
+
+		# Add counts if present in both hashes
+		if (defined($data2_count)) { $data1_count += $data2_count; }
+
+		# Store sum in %result
+		$result{$line} = $data1_count;
+
+		$found++;
+		if ($data1_count > 0) { $hit++; }
+	}
+
+	# Add lines unique to data2
+	foreach $line (keys(%data2))
+	{
+		# Skip lines already in data1
+		if (defined($data1{$line})) { next; }
+
+		# Copy count from data2
+		$result{$line} = $data2{$line};
+
+		$found++;
+		if ($result{$line} > 0) { $hit++; }
+	}
+
+	return (\%result, $found, $hit);
+}
+
+
+#
+# apply_baseline(data_ref, baseline_ref)
+#
+# Subtract the execution counts found in the baseline hash referenced by
+# BASELINE_REF from actual data in DATA_REF.
+#
+
+sub apply_baseline($$)
+{
+	my %data_hash = %{$_[0]};
+	my %base_hash = %{$_[1]};
+	my $filename;
+	my $testname;
+	my $data;
+	my $data_testdata;
+	my $data_funcdata;
+	my $data_checkdata;
+	my $data_count;
+	my $base;
+	my $base_testdata;
+	my $base_checkdata;
+	my $base_count;
+	my $sumcount;
+	my $found;
+	my $hit;
+
+	foreach $filename (keys(%data_hash))
+	{
+		# Get data set for data and baseline
+		$data = $data_hash{$filename};
+		$base = $base_hash{$filename};
+
+		# Get set entries for data and baseline
+		($data_testdata, undef, $data_funcdata, $data_checkdata) =
+			get_info_entry($data);
+		($base_testdata, $base_count, undef, $base_checkdata) =
+			get_info_entry($base);
+
+		# Check for compatible checksums
+		merge_checksums($data_checkdata, $base_checkdata, $filename);
+
+		# Sumcount has to be calculated anew
+		$sumcount = {};
+
+		# For each test case, subtract test specific counts
+		foreach $testname (keys(%{$data_testdata}))
+		{
+			# Get counts of both data and baseline
+			$data_count = $data_testdata->{$testname};
+
+			$hit = 0;
+
+			($data_count, undef, $hit) =
+				subtract_counts($data_count, $base_count);
+
+			# Check whether this test case did hit any line at all
+			if ($hit > 0)
+			{
+				# Write back resulting hash
+				$data_testdata->{$testname} = $data_count;
+			}
+			else
+			{
+				# Delete test case which did not impact this
+				# file
+				delete($data_testdata->{$testname});
+			}
+
+			# Add counts to sum of counts
+			($sumcount, $found, $hit) =
+				add_counts($sumcount, $data_count);
+		}
+
+		# Write back resulting entry
+		set_info_entry($data, $data_testdata, $sumcount,
+			       $data_funcdata, $data_checkdata, $found, $hit);
+
+		$data_hash{$filename} = $data;
+	}
+
+	return (\%data_hash);
+}
+
+
+#
+# remove_unused_descriptions()
+#
+# Removes all test descriptions from the global hash %test_description which
+# are not present in %info_data.
+#
+
+sub remove_unused_descriptions()
+{
+	my $filename;		# The current filename
+	my %test_list;		# Hash containing found test names
+	my $test_data;		# Reference to hash test_name -> count_data
+	my $before;		# Initial number of descriptions
+	my $after;		# Remaining number of descriptions
+	
+	$before = scalar(keys(%test_description));
+
+	foreach $filename (keys(%info_data))
+	{
+		($test_data) = get_info_entry($info_data{$filename});
+		foreach (keys(%{$test_data}))
+		{
+			$test_list{$_} = "";
+		}
+	}
+
+	# Remove descriptions for tests which are not in our list
+	foreach (keys(%test_description))
+	{
+		if (!defined($test_list{$_}))
+		{
+			delete($test_description{$_});
+		}
+	}
+
+	$after = scalar(keys(%test_description));
+	if ($after < $before)
+	{
+		info("Removed ".($before - $after).
+		     " unused descriptions, $after remaining.\n");
+	}
+}
+
+
+#
+# merge_checksums(ref1, ref2, filename)
+#
+# REF1 and REF2 are references to hashes containing a mapping
+#
+#   line number -> checksum
+#
+# Merge checksum lists defined in REF1 and REF2 and return reference to
+# resulting hash. Die if a checksum for a line is defined in both hashes
+# but does not match.
+#
+
+sub merge_checksums($$$)
+{
+	my $ref1 = $_[0];
+	my $ref2 = $_[1];
+	my $filename = $_[2];
+	my %result;
+	my $line;
+
+	foreach $line (keys(%{$ref1}))
+	{
+		if (defined($ref2->{$line}) &&
+		    ($ref1->{$line} ne $ref2->{$line}))
+		{
+			die("ERROR: checksum mismatch at $filename:$line\n");
+		}
+		$result{$line} = $ref1->{$line};
+	}
+
+	foreach $line (keys(%{$ref2}))
+	{
+		$result{$line} = $ref2->{$line};
+	}
+
+	return \%result;
+}
+
+
+#
+# combine_info_entries(entry_ref1, entry_ref2, filename)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$$)
+{
+	my $entry1 = $_[0];	# Reference to hash containing first entry
+	my $testdata1;
+	my $sumcount1;
+	my $funcdata1;
+	my $checkdata1;
+
+	my $entry2 = $_[1];	# Reference to hash containing second entry
+	my $testdata2;
+	my $sumcount2;
+	my $funcdata2;
+	my $checkdata2;
+
+	my %result;		# Hash containing combined entry
+	my %result_testdata;
+	my $result_sumcount = {};
+	my %result_funcdata;
+	my $lines_found;
+	my $lines_hit;
+
+	my $testname;
+	my $filename = $_[2];
+
+	# Retrieve data
+	($testdata1, $sumcount1, $funcdata1, $checkdata1) =
+		get_info_entry($entry1);
+	($testdata2, $sumcount2, $funcdata2, $checkdata2) =
+		get_info_entry($entry2);
+
+	# Merge checksums
+	$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
+
+	# Combine funcdata
+	foreach (keys(%{$funcdata1}))
+	{
+		$result_funcdata{$_} = $funcdata1->{$_};
+	}
+
+	foreach (keys(%{$funcdata2}))
+	{
+		$result_funcdata{$_} = $funcdata2->{$_};
+	}
+	
+	# Combine testdata
+	foreach $testname (keys(%{$testdata1}))
+	{
+		if (defined($testdata2->{$testname}))
+		{
+			# testname is present in both entries, requires
+			# combination
+			($result_testdata{$testname}) =
+				add_counts($testdata1->{$testname},
+					   $testdata2->{$testname});
+		}
+		else
+		{
+			# testname only present in entry1, add to result
+			$result_testdata{$testname} = $testdata1->{$testname};
+		}
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+
+	foreach $testname (keys(%{$testdata2}))
+	{
+		# Skip testnames already covered by previous iteration
+		if (defined($testdata1->{$testname})) { next; }
+
+		# testname only present in entry2, add to result hash
+		$result_testdata{$testname} = $testdata2->{$testname};
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+	
+	# Calculate resulting sumcount
+
+	# Store result
+	set_info_entry(\%result, \%result_testdata, $result_sumcount,
+		       \%result_funcdata, $checkdata1, $lines_found,
+		       $lines_hit);
+
+	return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+	my %hash1 = %{$_[0]};
+	my %hash2 = %{$_[1]};
+	my $filename;
+
+	foreach $filename (keys(%hash2))
+	{
+		if ($hash1{$filename})
+		{
+			# Entry already exists in hash1, combine them
+			$hash1{$filename} =
+				combine_info_entries($hash1{$filename},
+						     $hash2{$filename},
+						     $filename);
+		}
+		else
+		{
+			# Entry is unique in both hashes, simply add to
+			# resulting hash
+			$hash1{$filename} = $hash2{$filename};
+		}
+	}
+
+	return(\%hash1);
+}
+
+
+#
+# apply_prefix(filename, prefix)
+#
+# If FILENAME begins with PREFIX, remove PREFIX from FILENAME and return
+# resulting string, otherwise return FILENAME.
+#
+
+sub apply_prefix($$)
+{
+	my $filename = $_[0];
+	my $prefix = $_[1];
+
+	if (defined($prefix) && ($prefix ne ""))
+	{
+		if ($filename =~ /^\Q$prefix\E\/(.*)$/)
+		{
+			return substr($filename, length($prefix) + 1);
+		}
+	}
+
+	return $filename;
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+#   MODE & 1: suppress STDOUT
+#   MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+	my $mode = shift;
+	my $result;
+	local *OLD_STDERR;
+	local *OLD_STDOUT;
+
+	# Save old stdout and stderr handles
+	($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
+	($mode & 2) && open(OLD_STDERR, ">>&STDERR");
+
+	# Redirect to /dev/null
+	($mode & 1) && open(STDOUT, ">/dev/null");
+	($mode & 2) && open(STDERR, ">/dev/null");
+ 
+	system(@_);
+	$result = $?;
+
+	# Close redirected handles
+	($mode & 1) && close(STDOUT);
+	($mode & 2) && close(STDERR);
+
+	# Restore old handles
+	($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
+	($mode & 2) && open(STDERR, ">>&OLD_STDERR");
+ 
+	return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+	my $filename = $_[0];
+	my %result;
+	my $key;
+	my $value;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<$filename"))
+	{
+		warn("WARNING: cannot read configuration file $filename\n");
+		return undef;
+	}
+	while (<HANDLE>)
+	{
+		chomp;
+		# Skip comments
+		s/#.*//;
+		# Remove leading blanks
+		s/^\s+//;
+		# Remove trailing blanks
+		s/\s+$//;
+		next unless length;
+		($key, $value) = split(/\s*=\s*/, $_, 2);
+		if (defined($key) && defined($value))
+		{
+			$result{$key} = $value;
+		}
+		else
+		{
+			warn("WARNING: malformed statement in line $. ".
+			     "of configuration file $filename\n");
+		}
+	}
+	close(HANDLE);
+	return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+#   key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hash CONFIG contains a value for
+# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. 
+#
+
+sub apply_config($)
+{
+	my $ref = $_[0];
+
+	foreach (keys(%{$ref}))
+	{
+		if (defined($config->{$_}))
+		{
+			${$ref->{$_}} = $config->{$_};
+		}
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/utils/lcov/geninfo	Tue Oct 03 10:20:55 2006 +0200
@@ -0,0 +1,1719 @@
+#!/usr/bin/perl -w
+#
+#   Copyright (c) International Business Machines  Corp., 2002
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.                 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# geninfo
+#
+#   This script generates .info files from data files as created by code
+#   instrumented with gcc's built-in profiling mechanism. Call it with
+#   --help and refer to the geninfo man page to get information on usage
+#   and available options.
+#
+#
+# Authors:
+#   2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#                         IBM Lab Boeblingen
+#        based on code by Manoj Iyer <manjo@mail.utexas.edu> and
+#                         Megan Bock <mbock@us.ibm.com>
+#                         IBM Austin
+#   2002-09-05 / Peter Oberparleiter: implemented option that allows file list
+#   2003-04-16 / Peter Oberparleiter: modified read_gcov so that it can also
+#                parse the new gcov format which is to be introduced in gcc 3.3
+#   2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
+#   2003-07-03 / Peter Oberparleiter: added line checksum support, added
+#                --no-checksum
+#   2003-09-18 / Nigel Hinds: capture branch coverage data from GCOV
+#   2003-12-11 / Laurent Deniel: added --follow option
+#                workaround gcov (<= 3.2.x) bug with empty .da files
+#   2004-01-03 / Laurent Deniel: Ignore empty .bb files
+#   2004-02-16 / Andreas Krebbel: Added support for .gcno/.gcda files and
+#                gcov versioning
+#   2004-08-09 / Peter Oberparleiter: added configuration file support
+#
+
+use strict;
+use File::Basename; 
+use Getopt::Long;
+use Digest::MD5 qw(md5_base64);
+
+
+# Constants
+our $lcov_version	= "LTP GCOV extension version 1.4";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+our $gcov_tool		= "gcov";
+
+our $GCOV_VERSION_3_4_0	= 0x30400;
+our $GCOV_VERSION_3_3_0	= 0x30300;
+our $GCNO_FUNCTION_TAG	= 0x01000000;
+our $GCNO_LINES_TAG	= 0x01450000;
+our $GCNO_FILE_MAGIC	= 0x67636e6f;
+our $BBG_FILE_MAGIC	= 0x67626267;
+
+our $COMPAT_SLES9	= "sles9";
+
+# Prototypes
+sub print_usage(*);
+sub gen_info($);
+sub process_dafile($);
+sub match_filename($@);
+sub solve_ambiguous_match($$$);
+sub split_filename($);
+sub solve_relative_path($$);
+sub get_dir($);
+sub read_gcov_header($);
+sub read_gcov_file($);
+sub read_bb_file($);
+sub read_string(*$);
+sub read_gcno_file($);
+sub read_gcno_string(*$);
+sub read_sles9_bbg_file($);
+sub read_sles9_bbg_string(*$);
+sub unpack_int32($$);
+sub info(@);
+sub get_gcov_version();
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+
+# Global variables
+our $gcov_version;
+our $graph_file_extension;
+our $data_file_extension;
+our @data_directory;
+our $test_name = "";
+our $quiet;
+our $help;
+our $output_filename;
+our $version;
+our $follow;
+our $nochecksum;
+our $preserve_paths;
+our $adjust_testname = (`uname -m` =~ /^s390/); # Always on on s390
+our $config;		# Configuration file contents
+our $compatibility;	# Compatibility version flag - used to indicate
+			# non-standard GCOV data format versions
+
+our $cwd = `pwd`;
+chomp($cwd);
+
+
+#
+# Code entry point
+#
+
+# Register handler routine to be called when interrupted
+$SIG{"INT"} = \&int_handler;
+
+# Read configuration file if available
+if (-r $ENV{"HOME"}."/.lcovrc")
+{
+	$config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+	$config = read_config("/etc/lcovrc");
+}
+
+if ($config)
+{
+	# Copy configuration file values to variables
+	apply_config({
+		"geninfo_gcov_tool"		=> \$gcov_tool,
+		"geninfo_adjust_testname"	=> \$adjust_testname,
+		"geninfo_no_checksum"		=> \$nochecksum});
+}
+
+# Parse command line options
+if (!GetOptions("test-name=s" => \$test_name,
+		"output-filename=s" => \$output_filename,
+		"no-checksum" => \$nochecksum,
+		"version" =>\$version,
+		"quiet" => \$quiet,
+		"help" => \$help,
+		"follow" => \$follow
+		))
+{
+	print_usage(*STDERR);
+	exit(1);
+}
+
+@data_directory = @ARGV;
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print($lcov_version."\n");
+	exit(0);
+}
+
+# Adjust test name if necessary (standard for s390 architecture)
+if ($adjust_testname)
+{
+	$test_name .= "__".`uname -a`;
+	$test_name =~ s/\W/_/g;
+}
+
+# Check for follow option
+if ($follow)
+{
+	$follow = "-follow"
+}
+else
+{
+	$follow = "";
+}
+
+# Check for directory name
+if (!@data_directory)
+{
+	print(STDERR "No directory specified\n");
+	print_usage(*STDERR);
+	exit(1);
+}
+else
+{
+	foreach (@data_directory)
+	{
+		stat($_);
+		if (!-r _)
+		{
+			die("ERROR: cannot read $_!\n");
+		}
+	}
+}
+
+if (system_no_output(3, $gcov_tool, "--help") == -1)
+{
+	die("ERROR: need tool $gcov_tool!\n");
+}
+
+$gcov_version = get_gcov_version();
+
+if ($gcov_version < $GCOV_VERSION_3_4_0)
+{
+	if (defined($compatibility) && $compatibility eq $COMPAT_SLES9)
+	{
+		$data_file_extension = ".da";
+		$graph_file_extension = ".bbg";
+	}
+	else
+	{
+		$data_file_extension = ".da";
+		$graph_file_extension = ".bb";
+	}
+}
+else
+{
+	$data_file_extension = ".gcda";
+	$graph_file_extension = ".gcno";
+}	
+
+# Check for availability of --preserve-paths option of gcov
+if (`$gcov_tool --help` =~ /--preserve-paths/)
+{
+	$preserve_paths = "--preserve-paths";
+}
+
+# Check output filename
+if (defined($output_filename) && ($output_filename ne "-"))
+{
+	# Initially create output filename, data is appended
+	# for each data file processed
+	local *DUMMY_HANDLE;
+	open(DUMMY_HANDLE, ">$output_filename")
+		or die("ERROR: cannot create $output_filename!\n");
+	close(DUMMY_HANDLE);
+
+	# Make $output_filename an absolute path because we're going
+	# to change directories while processing files
+	if (!($output_filename =~ /^\/(.*)$/))
+	{
+		$output_filename = $cwd."/".$output_filename;
+	}
+}
+
+# Do something
+foreach (@data_directory)
+{
+	gen_info($_);
+}
+info("Finished .info-file creation\n");
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+	my $tool_name = basename($0);
+
+	print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS] DIRECTORY
+
+Traverse DIRECTORY and create a .info file for each .da/.gcda file found. Note
+that you may specify more than one directory, all of which are then processed
+sequentially.
+
+  -h, --help                        Print this help, then exit
+  -v, --version                     Print version number, then exit
+  -q, --quiet                       Do not print progress messages
+  -t, --test-name NAME              Use test case name NAME for resulting data
+  -o, --output-filename OUTFILE     Write data only to OUTFILE
+  -f, --follow                      Follow links when searching .da/.gcda files
+  -n, --no-checksum                 Do not calculate checksum for each line
+
+See $lcov_url for more information about this tool.
+END_OF_USAGE
+	;
+}
+
+
+#
+# gen_info(directory)
+#
+# Traverse DIRECTORY and create a .info file for each data file found.
+# The .info file contains TEST_NAME in the following format:
+#
+#   TN:<test name>
+#
+# For each source file name referenced in the data file, there is a section
+# containing source code and coverage data:
+#
+#   SF:<absolute path to the source file>
+#   FN:<line number of function start>,<function name> for each function
+#   DA:<line number>,<execution count> for each instrumented line
+#   LH:<number of lines with an execution count> greater than 0
+#   LF:<number of instrumented lines>
+#
+# Sections are separated by:
+#
+#   end_of_record
+#
+# In addition to the main source code file there are sections for each
+# #included file containing executable code. Note that the absolute path
+# of a source file is generated by interpreting the contents of the respective
+# graph file. Relative filenames are prefixed with the directory in which the
+# graph file is found. Note also that symbolic links to the graph file will be
+# resolved so that the actual file path is used instead of the path to a link.
+# This approach is necessary for the mechanism to work with the /proc/gcov
+# files.
+#
+# Die on error.
+#
+
+sub gen_info($)
+{
+	my $directory = $_[0];
+	my @file_list;
+
+	if (-d $directory)
+	{
+		info("Scanning $directory for $data_file_extension ".
+		     "files ...\n");	
+
+		@file_list = `find $directory $follow -name \\*$data_file_extension -type f 2>/dev/null`;
+		chomp(@file_list);
+		@file_list or die("ERROR: no $data_file_extension files found ".
+				  "in $directory!\n");
+		info("Found %d data files in %s\n", $#file_list+1, $directory);
+	}
+	else
+	{
+		@file_list = ($directory);
+	}
+
+	# Process all files in list
+	foreach (@file_list) { process_dafile($_); }
+}
+
+
+#
+# process_dafile(da_filename)
+#
+# Create a .info file for a single data file.
+#
+# Die on error.
+#
+
+sub process_dafile($)
+{
+	info("Processing %s\n", $_[0]);
+
+	my $da_filename;	# Name of data file to process
+	my $da_dir;		# Directory of data file
+	my $da_basename;	# data filename without ".da/.gcda" extension
+	my $bb_filename;	# Name of respective graph file
+	my %bb_content;		# Contents of graph file
+	my $gcov_error;		# Error code of gcov tool
+	my $object_dir;		# Directory containing all object files
+	my $source_filename;	# Name of a source code file
+	my $gcov_file;		# Name of a .gcov file
+	my @gcov_content;	# Content of a .gcov file
+	my @gcov_branches;	# Branch content of a .gcov file
+	my @gcov_list;		# List of generated .gcov files
+	my $line_number;	# Line number count
+	my $lines_hit;		# Number of instrumented lines hit
+	my $lines_found;	# Number of instrumented lines found
+	my $source;		# gcov source header information
+	my $object;		# gcov object header information
+	my @matches;		# List of absolute paths matching filename
+	my @unprocessed;	# List of unprocessed source code files
+	my @result;
+	my $index;
+	my $da_renamed;		# If data file is to be renamed
+	local *INFO_HANDLE;
+
+	# Get path to data file in absolute and normalized form (begins with /,
+	# contains no more ../ or ./)
+	$da_filename = solve_relative_path($cwd, $_[0]);
+
+	# Get directory and basename of data file
+	($da_dir, $da_basename) = split_filename($da_filename);
+
+	# Check for writable $da_dir (gcov will try to write files there)
+	stat($da_dir);
+	if (!-w _)
+	{
+		die("ERROR: cannot write to directory $da_dir!\n");
+	}
+
+	if (-z $da_filename)
+	{
+		$da_renamed = 1;
+	}
+	else
+	{
+		$da_renamed = 0;
+	}
+
+	# Construct name of graph file
+	$bb_filename = $da_dir."/".$da_basename.$graph_file_extension;
+
+
+	# Find out the real location of graph file in case we're just looking at
+	# a link
+	while (readlink($bb_filename))
+	{
+		$bb_filename = readlink($bb_filename);
+	}
+
+	# Ignore empty graph file (e.g. source file with no statement)
+	if (-z $bb_filename)
+	{
+		warn("WARNING: empty $bb_filename (skipped)\n");
+		return;
+	}
+
+	# Read contents of graph file into hash. We need it later to find out
+	# the absolute path to each .gcov file created as well as for
+	# information about functions and their source code positions.
+	if ($gcov_version < $GCOV_VERSION_3_4_0)
+	{
+		if (defined($compatibility) && $compatibility eq $COMPAT_SLES9)
+		{
+			%bb_content = read_sles9_bbg_file($bb_filename);
+		}
+		else
+		{
+			%bb_content = read_bb_file($bb_filename);
+		}
+	} 
+	else
+	{
+		%bb_content = read_gcno_file($bb_filename);
+	} 
+
+	# Set $object_dir to real location of object files. This may differ
+	# from $da_dir if the graph file is just a link to the "real" object
+	# file location. We need to apply GCOV with using that directory to
+	# ensure that all relative #include-files are found as well.
+	($object_dir) = split_filename($bb_filename);
+
+	# Is the data file in the same directory with all the other files?
+	if ($object_dir ne $da_dir)
+	{
+		# Need to create link to data file in $object_dir
+		system("ln", "-s", $da_filename, 
+		       "$object_dir/$da_basename$data_file_extension")
+			and die ("ERROR: cannot create link $object_dir/".
+				 "$da_basename$data_file_extension!\n");
+	}
+
+	# Change to directory containing data files and apply GCOV
+	#chdir($object_dir);
+
+	if ($da_renamed)
+	{
+		# Need to rename empty data file to workaround
+	        # gcov <= 3.2.x bug (Abort)
+		system_no_output(3, "mv", "$da_filename", "$da_filename.ori")
+			and die ("ERROR: cannot rename $da_filename\n");
+	}
+
+	# Execute gcov command and suppress standard output
+	if ($preserve_paths)
+	{
+		$gcov_error = system_no_output(1, $gcov_tool, $da_basename.".c",
+					       "-o", $object_dir,
+					       "--preserve-paths",
+					       "-b");
+	}
+	else
+	{
+		$gcov_error = system_no_output(1, $gcov_tool, $da_basename.".c",
+					       "-o", $object_dir,
+					       "-b");
+	}
+
+	if ($da_renamed)
+	{
+		system_no_output(3, "mv", "$da_filename.ori", "$da_filename")
+			and die ("ERROR: cannot rename $da_filename.ori");
+	}
+
+	# Clean up link
+	if ($object_dir ne $da_dir)
+	{
+		unlink($object_dir."/".$da_basename.$data_file_extension);
+	}
+
+	$gcov_error and die("ERROR: GCOV failed for $da_filename!\n");
+
+	# Collect data from resulting .gcov files and create .info file
+	@gcov_list = glob("*.gcov");
+
+	# Check for files
+	if (!@gcov_list)
+	{
+		warn("WARNING: gcov did not create any files for ".
+		     "$da_filename!\n");
+	}
+
+	# Check whether we're writing to a single file
+	if ($output_filename)
+	{
+		if ($output_filename eq "-")
+		{
+			*INFO_HANDLE = *STDOUT;
+		}
+		else
+		{
+			# Append to output file
+			open(INFO_HANDLE, ">>$output_filename")
+				or die("ERROR: cannot write to ".
+				       "$output_filename!\n");
+		}
+	}
+	else
+	{
+		# Open .info file for output
+		open(INFO_HANDLE, ">$da_filename.info")
+			or die("ERROR: cannot create $da_filename.info!\n");
+	}
+
+	# Write test name
+	printf(INFO_HANDLE "TN:%s\n", $test_name);
+
+	# Traverse the list of generated .gcov files and combine them into a
+	# single .info file
+	@unprocessed = keys(%bb_content);
+	foreach $gcov_file (@gcov_list)
+	{
+		($source, $object) = read_gcov_header($gcov_file);
+
+		if ($source)
+		{
+			$source = solve_relative_path($object_dir, $source);
+		}
+
+		# gcov will happily create output even if there's no source code
+		# available - this interfers with checksum creation so we need
+		# to pull the emergency brake here.
+		if (defined($source) && ! -r $source && ! $nochecksum)
+		{
+			die("ERROR: could not read source file $source\n");
+		}
+
+		@matches = match_filename(defined($source) ? $source :
+					  $gcov_file, keys(%bb_content));
+
+		# Skip files that are not mentioned in the graph file
+		if (!@matches)
+		{
+			warn("WARNING: cannot find an entry for ".$gcov_file.
+			     " in $graph_file_extension file, skipping ".
+			     "file!\n");
+			unlink($gcov_file);
+			next;
+		}
+
+		# Read in contents of gcov file
+		@result = read_gcov_file($gcov_file);
+		@gcov_content = @{$result[0]};
+		@gcov_branches = @{$result[1]};
+
+		# Skip empty files
+		if (!@gcov_content)
+		{
+			warn("WARNING: skipping empty file ".$gcov_file."\n");
+			unlink($gcov_file);
+			next;
+		}
+
+		if (scalar(@matches) == 1)
+		{
+			# Just one match
+			$source_filename = $matches[0];
+		}
+		else
+		{
+			# Try to solve the ambiguity
+			$source_filename = solve_ambiguous_match($gcov_file,
+						\@matches, \@gcov_content);
+		}
+
+		# Remove processed file from list
+		for ($index = scalar(@unprocessed) - 1; $index >= 0; $index--)
+		{
+			if ($unprocessed[$index] eq $source_filename)
+			{
+				splice(@unprocessed, $index, 1);
+				last;
+			}
+		}
+
+		# Write absolute path of source file
+		printf(INFO_HANDLE "SF:%s\n", $source_filename);
+
+		# Write function-related information
+		foreach (split(",",$bb_content{$source_filename}))
+		{
+			# Write "line_number,function_name" for each function.
+			# Note that $_ contains this information in the form
+			# "function_name=line_number" so that the order of
+			# elements has to be reversed.
+			printf(INFO_HANDLE "FN:%s\n",
+			       join(",", (split("=", $_))[1,0]));
+		}
+
+		# Reset line counters
+		$line_number = 0;
+		$lines_found = 0;
+		$lines_hit = 0;
+
+		# Write coverage information for each instrumented line
+		# Note: @gcov_content contains a list of (flag, count, source)
+		# tuple for each source code line
+		while (@gcov_content)
+		{
+			$line_number++;
+
+			# Check for instrumented line
+			if ($gcov_content[0])
+			{
+				$lines_found++;
+				printf(INFO_HANDLE "DA:".$line_number.",".
+				       $gcov_content[1].($nochecksum ? "" :
+				       ",". md5_base64($gcov_content[2])).
+				       "\n");
+
+				# Increase $lines_hit in case of an execution
+				# count>0
+				if ($gcov_content[1] > 0) { $lines_hit++; }
+			}
+
+			# Remove already processed data from array
+			splice(@gcov_content,0,3);
+		}
+
+		#--
+		#-- BA: <code-line>, <branch-coverage>
+		#--
+		#-- print one BA line for every branch of a
+		#-- conditional.  <branch-coverage> values
+		#-- are:
+		#--     0 - not executed
+		#--     1 - executed but not taken
+		#--     2 - executed and taken
+		#--
+		while (@gcov_branches)
+		{
+			if ($gcov_branches[0])
+			{
+				printf(INFO_HANDLE "BA:%s,%s\n",
+				       $gcov_branches[0],
+				       $gcov_branches[1]);
+			}
+			splice(@gcov_branches,0,2);
+		}
+
+		# Write line statistics and section separator
+		printf(INFO_HANDLE "LF:%s\n", $lines_found);
+		printf(INFO_HANDLE "LH:%s\n", $lines_hit);
+		print(INFO_HANDLE "end_of_record\n");
+
+		# Remove .gcov file after processing
+		unlink($gcov_file);
+	}
+
+	# Check for files which show up in the graph file but were never
+	# processed
+	if (@unprocessed && @gcov_list)
+	{
+		foreach (@unprocessed)
+		{
+			warn("WARNING: no data found for $_\n");
+		}
+	}
+
+	if (!($output_filename && ($output_filename eq "-")))
+	{
+		close(INFO_HANDLE);
+	}
+
+	# Change back to initial directory
+	chdir($cwd);
+}
+
+
+#
+# solve_relative_path(path, dir)
+#
+# Solve relative path components of DIR which, if not absolute, resides in PATH.
+#
+
+sub solve_relative_path($$)
+{
+	my $path = $_[0];
+	my $dir = $_[1];
+	my $result;
+
+	$result = $dir;
+	# Prepend path if not absolute
+	if ($dir =~ /^[^\/]/)
+	{
+		$result = "$path/$result";
+	}
+
+	# Remove //
+	$result =~ s/\/\//\//g;
+
+	# Remove .
+	$result =~ s/\/\.\//\//g;
+
+	# Solve ..
+	while ($result =~ s/\/[^\/]+\/\.\.\//\//)
+	{
+	}
+
+	# Remove preceding ..
+	$result =~ s/^\/\.\.\//\//g;
+
+	return $result;
+}
+
+
+#
+# match_filename(gcov_filename, list)
+#
+# Return a list of those entries of LIST which match the relative filename
+# GCOV_FILENAME.
+#
+
+sub match_filename($@)
+{
+	my $filename = shift;
+	my @list = @_;
+	my @result;
+
+	$filename =~ s/^(.*).gcov$/$1/;
+
+	if ($filename =~ /^\/(.*)$/)
+	{
+		$filename = "$1";
+	}
+
+	foreach (@list)
+	{
+		if (/\/\Q$filename\E(.*)$/ && $1 eq "")
+		{
+			@result = (@result, $_);
+		}
+	}
+	return @result;
+}
+
+
+#
+# solve_ambiguous_match(rel_filename, matches_ref, gcov_content_ref)
+#
+# Try to solve ambiguous matches of mapping (gcov file) -> (source code) file
+# by comparing source code provided in the GCOV file with that of the files
+# in MATCHES. REL_FILENAME identifies the relative filename of the gcov
+# file.
+# 
+# Return the one real match or die if there is none.
+#
+
+sub solve_ambiguous_match($$$)
+{
+	my $rel_name = $_[0];
+	my $matches = $_[1];
+	my $content = $_[2];
+	my $filename;
+	my $index;
+	my $no_match;
+	local *SOURCE;
+
+	# Check the list of matches
+	foreach $filename (@$matches)
+	{
+
+		# Compare file contents
+		open(SOURCE, $filename)
+			or die("ERROR: cannot read $filename!\n");
+
+		$no_match = 0;
+		for ($index = 2; <SOURCE>; $index += 3)
+		{
+			chomp;
+
+			if ($_ ne @$content[$index])
+			{
+				$no_match = 1;
+				last;
+			}
+		}
+
+		close(SOURCE);
+
+		if (!$no_match)
+		{
+			info("Solved source file ambiguity for $rel_name\n");
+			return $filename;
+		}
+	}
+
+	die("ERROR: could not match gcov data for $rel_name!\n");
+}
+
+
+#
+# split_filename(filename)
+#
+# Return (path, filename, extension) for a given FILENAME.
+#
+
+sub split_filename($)
+{
+	my @path_components = split('/', $_[0]);
+	my @file_components = split('\.', pop(@path_components));
+	my $extension = pop(@file_components);
+
+	return (join("/",@path_components), join(".",@file_components),
+		$extension);
+}
+
+
+#
+# get_dir(filename);
+#
+# Return the directory component of a given FILENAME.
+#
+
+sub get_dir($)
+{
+	my @components = split("/", $_[0]);
+	pop(@components);
+
+	return join("/", @components);
+}
+
+
+#
+# read_gcov_header(gcov_filename)
+#
+# Parse file GCOV_FILENAME and return a list containing the following
+# information:
+#
+#   (source, object)
+#
+# where:
+#
+# source: complete relative path of the source code file (gcc >= 3.3 only)
+# object: name of associated graph file
+#
+# Die on error.
+#
+
+sub read_gcov_header($)
+{
+	my $source;
+	my $object;
+	local *INPUT;
+
+	open(INPUT, $_[0])
+		or die("ERROR: cannot read $_[0]!\n");
+
+	while (<INPUT>)
+	{
+		chomp($_);
+
+		if (/^\s+-:\s+0:Source:(.*)$/)
+		{
+			# Source: header entry
+			$source = $1;
+		}
+		elsif (/^\s+-:\s+0:Object:(.*)$/)
+		{
+			# Object: header entry
+			$object = $1;
+		}
+		else
+		{
+			last;
+		}
+	}
+
+	close(INPUT);
+
+	return ($source, $object);
+}
+
+
+#
+# read_gcov_file(gcov_filename)
+#
+# Parse file GCOV_FILENAME (.gcov file format) and return the list:
+# (reference to gcov_content, reference to gcov_branch)
+#
+# gcov_content is a list of 3 elements
+# (flag, count, source) for each source code line:
+#
+# $result[($line_number-1)*3+0] = instrumentation flag for line $line_number
+# $result[($line_number-1)*3+1] = execution count for line $line_number
+# $result[($line_number-1)*3+2] = source code text for line $line_number
+#
+# gcov_branch is a list of 2 elements
+# (linenumber, branch result) for each branch
+#
+# Die on error.
+#
+
+sub read_gcov_file($)
+{
+	my $filename = $_[0];
+	my @result = ();
+	my @branches = ();
+	my $number;
+	local *INPUT;
+
+	open(INPUT, $filename)
+		or die("ERROR: cannot read $filename!\n");
+
+	if ($gcov_version < $GCOV_VERSION_3_3_0)
+	{
+		# Expect gcov format as used in gcc < 3.3
+		while (<INPUT>)
+		{
+			chomp($_);
+
+			if (/^\t\t(.*)$/)
+			{
+				# Uninstrumented line
+				push(@result, 0);
+				push(@result, 0);
+				push(@result, $1);
+			}
+			elsif (/^branch/)
+			{
+				# Branch execution data
+				push(@branches, scalar(@result) / 3);
+				if (/^branch \d+ never executed$/)
+				{
+					push(@branches, 0);
+				}
+				elsif (/^branch \d+ taken = 0%/)
+				{
+					push(@branches, 1);
+				}
+				else
+				{
+					push(@branches, 2);
+				}
+			}
+			elsif (/^call/)
+			{
+				# Function call return data
+			}
+			else
+			{
+				# Source code execution data
+				$number = (split(" ",substr($_, 0, 16)))[0];
+
+				# Check for zero count which is indicated
+				# by ######
+				if ($number eq "######") { $number = 0;	}
+
+				push(@result, 1);
+				push(@result, $number);
+				push(@result, substr($_, 16));
+			}
+		}
+	}
+	else
+	{
+		# Expect gcov format as used in gcc >= 3.3
+		while (<INPUT>)
+		{
+			chomp($_);
+
+			if (/^branch\s+\d+\s+(\S+)\s+(\S+)/)
+			{
+				# Branch execution data
+				push(@branches, scalar(@result) / 3);
+				if ($1 eq "never")
+				{
+					push(@branches, 0);
+				}
+				elsif ($2 eq "0%")
+				{
+					push(@branches, 1);
+				}
+				else
+				{
+					push(@branches, 2);
+				}
+			}
+			elsif (/^call/)
+			{
+				# Function call return data
+			}
+			elsif (/^\s*([^:]+):\s*([^:]+):(.*)$/)
+			{
+				# <exec count>:<line number>:<source code>
+				if ($2 eq "0")
+				{
+					# Extra data
+				}
+				elsif ($1 eq "-")
+				{
+					# Uninstrumented line
+					push(@result, 0);
+					push(@result, 0);
+					push(@result, $3);
+				}
+				else
+				{
+					# Source code execution data
+					$number = $1;
+
+					# Check for zero count
+					if ($number eq "#####")	{ $number = 0; }
+
+					push(@result, 1);
+					push(@result, $number);
+					push(@result, $3);
+				}
+			}
+		}
+	}
+
+	close(INPUT);
+	return(\@result, \@branches);
+}
+
+
+#
+# read_bb_file(bb_filename)
+#
+# Read .bb file BB_FILENAME and return a hash containing the following
+# mapping:
+#
+#   filename -> comma-separated list of pairs (function name=starting
+#               line number) for each function found
+#
+# for each entry in the .bb file. Filenames are absolute, i.e. relative
+# filenames are prefixed with bb_filename's path component.
+#
+# Die on error.
+#
+
+sub read_bb_file($)
+{
+	my $bb_filename		= $_[0];
+	my %result;
+	my $filename;
+	my $function_name;
+	my $cwd			= `pwd`;
+	chomp($cwd);
+	my $base_dir		= get_dir(solve_relative_path(
+					  $cwd, $bb_filename));
+	my $minus_one		= sprintf("%d", 0x80000001);
+	my $minus_two		= sprintf("%d", 0x80000002);
+	my $value;
+	my $packed_word;
+	local *INPUT;
+
+	open(INPUT, $bb_filename)
+		or die("ERROR: cannot read $bb_filename!\n");
+
+	binmode(INPUT);
+
+	# Read data in words of 4 bytes
+	while (read(INPUT, $packed_word, 4) == 4)
+	{
+		# Decode integer in intel byteorder
+		$value = unpack_int32($packed_word, 0);
+
+		# Note: the .bb file format is documented in GCC info pages
+		if ($value == $minus_one)
+		{
+			# Filename follows
+			$filename = read_string(*INPUT, $minus_one)
+				or die("ERROR: incomplete filename in ".
+				       "$bb_filename!\n");
+
+			# Make path absolute
+			$filename = solve_relative_path($base_dir, $filename);
+
+			# Insert into hash if not yet present.
+			# This is necessary because functions declared as
+			# "inline" are not listed as actual functions in
+			# .bb files
+			if (!$result{$filename})
+			{
+				$result{$filename}="";
+			}
+		}
+		elsif ($value == $minus_two)
+		{
+			# Function name follows
+			$function_name = read_string(*INPUT, $minus_two)
+				 or die("ERROR: incomplete function ".
+					"name in $bb_filename!\n");
+		}
+		elsif ($value > 0)
+		{
+			if ($function_name)
+			{
+				# Got a full entry filename, funcname, lineno
+				# Add to resulting hash
+
+				$result{$filename}.=
+				  ($result{$filename} ? "," : "").
+				  join("=",($function_name,$value));
+				undef($function_name);
+			}
+		}
+	}
+	close(INPUT);
+
+	if (!scalar(keys(%result)))
+	{
+		die("ERROR: no data found in $bb_filename!\n");
+	}
+	return %result;
+}
+
+
+#
+# read_string(handle, delimiter);
+#
+# Read and return a string in 4-byte chunks from HANDLE until DELIMITER
+# is found.
+#
+# Return empty string on error.
+#
+
+sub read_string(*$)
+{
+	my $HANDLE	= $_[0];
+	my $delimiter	= $_[1];
+	my $string	= "";
+	my $packed_word;
+	my $value;
+
+	while (read($HANDLE,$packed_word,4) == 4)
+	{
+		$value = unpack_int32($packed_word, 0);
+
+		if ($value == $delimiter)
+		{
+			# Remove trailing nil bytes
+			$/="\0";
+			while (chomp($string)) {};
+			$/="\n";
+			return($string);
+		}
+
+		$string = $string.$packed_word;
+	}
+	return("");
+}
+
+
+#
+# read_gcno_file(bb_filename)
+#
+# Read .gcno file BB_FILENAME and return a hash containing the following
+# mapping:
+#
+#   filename -> comma-separated list of pairs (function name=starting
+#               line number) for each function found
+#
+# for each entry in the .gcno file. Filenames are absolute, i.e. relative
+# filenames are prefixed with bb_filename's path component.
+#
+# Die on error.
+#
+
+sub read_gcno_file($)
+{
+	my $gcno_filename	= $_[0];
+	my %result;
+	my $filename;
+	my $function_name;
+	my $lineno;
+	my $length;
+	my $cwd			= `pwd`;
+	my $value;
+	my $endianness;
+	my $blocks;
+	chomp($cwd);
+	my $base_dir		= get_dir(solve_relative_path(
+						$cwd, $gcno_filename));
+	my $packed_word;
+	local *INPUT;
+
+	open(INPUT, $gcno_filename)
+		or die("ERROR: cannot read $gcno_filename!\n");
+
+	binmode(INPUT);
+	
+	read(INPUT, $packed_word, 4) == 4
+		or die("ERROR: Invalid gcno file format\n");
+
+	$value = unpack_int32($packed_word, 0);
+	$endianness = !($value == $GCNO_FILE_MAGIC);
+
+	unpack_int32($packed_word, $endianness) == $GCNO_FILE_MAGIC
+		or die("ERROR: gcno file magic does not match\n");
+
+	seek(INPUT, 8, 1);
+
+	# Read data in words of 4 bytes
+	while (read(INPUT, $packed_word, 4) == 4)
+	{
+		# Decode integer in intel byteorder
+		$value = unpack_int32($packed_word, $endianness);
+
+		if ($value == $GCNO_FUNCTION_TAG)
+		{
+			# skip length, ident and checksum
+			seek(INPUT, 12, 1);
+			(undef, $function_name) =
+				read_gcno_string(*INPUT, $endianness);
+			(undef, $filename) =
+				read_gcno_string(*INPUT, $endianness);
+			$filename = solve_relative_path($base_dir, $filename);
+
+			read(INPUT, $packed_word, 4);
+			$lineno = unpack_int32($packed_word, $endianness);
+
+			$result{$filename}.=
+			    ($result{$filename} ? "," : "").
+				join("=",($function_name,$lineno));
+		}
+		elsif ($value == $GCNO_LINES_TAG)
+		{
+			# Check for names of files containing inlined code
+			# included in this file
+			read(INPUT, $packed_word, 4);
+			$length = unpack_int32($packed_word, $endianness);
+			while ($length > 0)
+			{
+				read(INPUT, $packed_word, 4);
+				$lineno = unpack_int32($packed_word,
+						       $endianness);
+				$length--;
+				if ($lineno != 0)
+				{
+					next;
+				}
+				($blocks, $filename) =
+					read_gcno_string(*INPUT, $endianness);
+				if ($blocks > 1)
+				{
+					$filename = solve_relative_path(
+							$base_dir, $filename);
+					if (!defined($result{$filename}))
+					{
+						$result{$filename} = "";
+					}
+				}
+				$length -= $blocks;
+			}
+		}
+		else
+		{
+			read(INPUT, $packed_word, 4);
+			$length = unpack_int32($packed_word, $endianness);
+			seek(INPUT, 4 * $length, 1);
+		}
+	}
+	close(INPUT);
+
+	if (!scalar(keys(%result)))
+	{
+		die("ERROR: no data found in $gcno_filename!\n");
+	}
+	return %result;
+}
+
+
+#
+# read_gcno_string(handle, endianness);
+#
+# Read a string in 4-byte chunks from HANDLE.
+#
+# Return (number of 4-byte chunks read, string).
+#
+
+sub read_gcno_string(*$)
+{
+	my $handle		= $_[0];
+	my $endianness		= $_[1];
+	my $number_of_blocks	= 0;
+	my $string		= "";
+	my $packed_word;
+
+	read($handle, $packed_word, 4) == 4
+		or die("ERROR: reading string\n");
+
+	$number_of_blocks = unpack_int32($packed_word, $endianness);
+
+	if ($number_of_blocks == 0)
+	{
+		return (1, undef);
+	}
+
+	read($handle, $packed_word, 4 * $number_of_blocks) ==
+		4 * $number_of_blocks or die("ERROR: reading string\n");
+	
+	$string = $string . $packed_word;
+	
+	# Remove trailing nil bytes
+	$/="\0";
+	while (chomp($string)) {};
+	$/="\n";
+
+	return(1 + $number_of_blocks, $string);
+}
+
+
+#
+# read_sles9_bbg_file(bb_filename)
+#
+# Read .bbg file BB_FILENAME and return a hash containing the following
+# mapping:
+#
+#   filename -> comma-separated list of pairs (function name=starting
+#               line number) for each function found
+#
+# for each entry in the .bbg file. Filenames are absolute, i.e. relative
+# filenames are prefixed with bb_filename's path component.
+#
+# Die on error.
+#
+
+sub read_sles9_bbg_file($)
+{
+	my $bbg_filename = $_[0];
+	my %result;
+	my $filename;
+	my $function_name;
+	my $first_line;
+	my $lineno;
+	my $length;
+	my $cwd = `pwd`;
+	my $value;
+	my $endianness;
+	my $blocks;
+	chomp($cwd);
+	my $base_dir = get_dir(solve_relative_path($cwd, $bbg_filename));
+	my $packed_word;
+	local *INPUT;
+
+	open(INPUT, $bbg_filename)
+		or die("ERROR: cannot read $bbg_filename!\n");
+
+	binmode(INPUT);
+	
+	read(INPUT, $packed_word, 4) == 4
+		or die("ERROR: invalid bbg file format\n");
+
+	$value = unpack_int32($packed_word, 0);
+	$endianness = 1;
+
+	unpack_int32($packed_word, $endianness) == $BBG_FILE_MAGIC
+		or die("ERROR: bbg file magic does not match\n");
+
+	seek(INPUT, 4, 1);
+
+	# Read data in words of 4 bytes
+	while (read(INPUT, $packed_word, 4) == 4)
+	{
+		# Decode integer in intel byteorder
+		$value = unpack_int32($packed_word, $endianness);
+
+		# Get record length
+		read(INPUT, $packed_word, 4);
+		$length = unpack_int32($packed_word, $endianness);
+
+		if ($value == $GCNO_FUNCTION_TAG)
+		{
+			# Get function name
+			($value, $function_name) =
+				read_sles9_bbg_string(*INPUT, $endianness);
+
+			seek(INPUT, $length - $value * 4, 1);
+		}
+		elsif ($value == $GCNO_LINES_TAG)
+		{
+			# Get linenumber and filename
+
+			# Skip block number
+			seek(INPUT, 4, 1);
+			$length -= 4;
+
+			while ($length > 0)
+			{
+				read(INPUT, $packed_word, 4);
+				$lineno = unpack_int32($packed_word,
+						       $endianness);
+				$length -= 4;
+				if ($lineno != 0)
+				{
+					if (!defined($first_line))
+					{
+						$first_line = $lineno;
+					}
+					next;
+				}
+				($blocks, $value) =
+					read_sles9_bbg_string(
+						*INPUT, $endianness);
+				if (!defined($filename))
+				{
+					$filename = $value;
+				}
+				$length -= $blocks * 4;
+			}
+			# Got a complete data set?
+			if (defined($filename) && defined($function_name) &&
+			    defined($first_line))
+			{
+				$filename = solve_relative_path(
+						$base_dir, $filename);
+				# Add it to our result hash
+				if (defined($result{$filename}))
+				{
+					$result{$filename} .=
+						",$function_name=$first_line";
+				}
+				else
+				{
+					$result{$filename} =
+						"$function_name=$first_line";
+				}
+				$filename = undef;
+				$function_name = undef;
+				$first_line = undef;
+			}
+		}
+		else
+		{
+			# Skip other records
+			seek(INPUT, $length, 1);
+		}
+	}
+	close(INPUT);
+
+	if (!scalar(keys(%result)))
+	{
+		die("ERROR: no data found in $bbg_filename!\n");
+	}
+	return %result;
+}
+
+
+#
+# read_sles9_bbg_string(handle, endianness);
+#
+# Read a string in 4-byte chunks from HANDLE.
+#
+# Return (number of 4-byte chunks read, string).
+#
+
+sub read_sles9_bbg_string(*$)
+{
+	my $handle		= $_[0];
+	my $endianness		= $_[1];
+	my $length		= 0;
+	my $string		= "";
+	my $packed_word;
+	my $pad;
+
+	read($handle, $packed_word, 4) == 4
+		or die("ERROR: reading string\n");
+
+	$length = unpack_int32($packed_word, $endianness);
+	$pad = 4 - $length % 4;
+
+	if ($length == 0)
+	{
+		return (1, undef);
+	}
+
+	read($handle, $string, $length) ==
+		$length or die("ERROR: reading string\n");
+	seek($handle, $pad, 1);
+	
+	return(1 + ($length + $pad) / 4, $string);
+}
+
+#
+# unpack_int32(word, endianess)
+#
+# Interpret 4-byte binary string WORD as signed 32 bit integer in
+# endian encoding defined by ENDIANNESS (0=little, 1=big) and return its
+# value.
+#
+
+sub unpack_int32($$)
+{
+	return sprintf("%d", unpack($_[1] ? "N" : "V",$_[0]));
+}
+
+
+#
+# Get the GCOV tool version. Return an integer number which represents the
+# GCOV version. Version numbers can be compared using standard integer
+# operations.
+#
+
+sub get_gcov_version()
+{
+	local *HANDLE;
+	my $version_string;
+	my $result;
+
+	open(GCOV_PIPE, "$gcov_tool -v |")
+		or die("ERROR: cannot retrieve gcov version!\n");
+	$version_string = <GCOV_PIPE>;
+	close(GCOV_PIPE);
+
+	$result = 0;
+	if ($version_string =~ /(\d+)\.(\d+)(\.(\d+))?/)
+	{
+		if (defined($4))
+		{
+			info("Found gcov version: $1.$2.$4\n");
+			$result = $1 << 16 | $2 << 8 | $4;
+		}
+		else
+		{
+			info("Found gcov version: $1.$2\n");
+			$result = $1 << 16 | $2 << 8;
+		}
+	}
+	if ($version_string =~ /suse/i && $result == 0x30303)
+	{
+		info("Using compatibility mode for SUSE GCC 3.3.3\n");
+		$compatibility = $COMPAT_SLES9;
+	}
+	return $result;
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+	if (!$quiet)
+	{
+		# Print info string
+		if (defined($output_filename) && ($output_filename eq "-"))
+		{
+			# Don't interfer with the .info output to STDOUT
+			printf(STDERR @_);
+		}
+		else
+		{
+			printf(@_);
+		}
+	}
+}
+
+
+#
+# int_handler()
+#
+# Called when the script was interrupted by an INT signal (e.g. CTRl-C)
+#
+
+sub int_handler()
+{
+	if ($cwd) { chdir($cwd); }
+	info("Aborted.\n");
+	exit(1);
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+#   MODE & 1: suppress STDOUT
+#   MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+	my $mode = shift;
+	my $result;
+	local *OLD_STDERR;
+	local *OLD_STDOUT;
+
+	# Save old stdout and stderr handles
+	($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
+	($mode & 2) && open(OLD_STDERR, ">>&STDERR");
+
+	# Redirect to /dev/null
+	($mode & 1) && open(STDOUT, ">/dev/null");
+	($mode & 2) && open(STDERR, ">/dev/null");
+ 
+	system(@_);
+	$result = $?;
+
+	# Close redirected handles
+	($mode & 1) && close(STDOUT);
+	($mode & 2) && close(STDERR);
+
+	# Restore old handles
+	($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
+	($mode & 2) && open(STDERR, ">>&OLD_STDERR");
+ 
+	return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+	my $filename = $_[0];
+	my %result;
+	my $key;
+	my $value;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<$filename"))
+	{
+		warn("WARNING: cannot read configuration file $filename\n");
+		return undef;
+	}
+	while (<HANDLE>)
+	{
+		chomp;
+		# Skip comments
+		s/#.*//;
+		# Remove leading blanks
+		s/^\s+//;
+		# Remove trailing blanks
+		s/\s+$//;
+		next unless length;
+		($key, $value) = split(/\s*=\s*/, $_, 2);
+		if (defined($key) && defined($value))
+		{
+			$result{$key} = $value;
+		}
+		else
+		{
+			warn("WARNING: malformed statement in line $. ".
+			     "of configuration file $filename\n");
+		}
+	}
+	close(HANDLE);
+	return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+#   key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hash CONFIG contains a value for
+# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. 
+#
+
+sub apply_config($)
+{
+	my $ref = $_[0];
+
+	foreach (keys(%{$ref}))
+	{
+		if (defined($config->{$_}))
+		{
+			${$ref->{$_}} = $config->{$_};
+		}
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/utils/lcov/lcov	Tue Oct 03 10:20:55 2006 +0200
@@ -0,0 +1,2268 @@
+#!/usr/bin/perl -w
+#
+#   Copyright (c) International Business Machines  Corp., 2002
+#
+#   This program is free software;  you can redistribute it and/or modify
+#   it under the terms of the GNU General Public License as published by
+#   the Free Software Foundation; either version 2 of the License, or (at
+#   your option) any later version.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY;  without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.                 
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program;  if not, write to the Free Software
+#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# lcov
+#
+#   This is a wrapper script which provides a single interface for accessing
+#   LCOV coverage data.
+#
+#
+# History:
+#   2002-08-29 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#                         IBM Lab Boeblingen
+#   2002-09-05 / Peter Oberparleiter: implemented --kernel-directory +
+#                multiple directories
+#   2002-10-16 / Peter Oberparleiter: implemented --add-tracefile option
+#   2002-10-17 / Peter Oberparleiter: implemented --extract option
+#   2002-11-04 / Peter Oberparleiter: implemented --list option
+#   2003-03-07 / Paul Larson: Changed to make it work with the latest gcov 
+#                kernel patch.  This will break it with older gcov-kernel
+#                patches unless you change the value of $gcovmod in this script
+#   2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
+#                when trying to combine .info files containing data without
+#                a test name
+#   2003-04-10 / Peter Oberparleiter: extended Paul's change so that LCOV
+#                works both with the new and the old gcov-kernel patch
+#   2003-04-10 / Peter Oberparleiter: added $gcov_dir constant in anticipation
+#                of a possible move of the gcov kernel directory to another
+#                file system in a future version of the gcov-kernel patch
+#   2003-04-15 / Paul Larson: make info write to STDERR, not STDOUT
+#   2003-04-15 / Paul Larson: added --remove option
+#   2003-04-30 / Peter Oberparleiter: renamed --reset to --zerocounters
+#                to remove naming ambiguity with --remove
+#   2003-04-30 / Peter Oberparleiter: adjusted help text to include --remove
+#   2003-06-27 / Peter Oberparleiter: implemented --diff
+#   2003-07-03 / Peter Oberparleiter: added line checksum support, added
+#                --no-checksum
+#   2003-12-11 / Laurent Deniel: added --follow option
+#   2004-03-29 / Peter Oberparleiter: modified --diff option to better cope with
+#                ambiguous patch file entries, modified --capture option to use
+#                modprobe before insmod (needed for 2.6)
+#   2004-03-30 / Peter Oberparleiter: added --path option
+#   2004-08-09 / Peter Oberparleiter: added configuration file support
+#
+
+use strict;
+use File::Basename; 
+use Getopt::Long;
+
+
+# Global constants
+our $lcov_version	= "LTP GCOV extension version 1.4";
+our $lcov_url		= "http://ltp.sourceforge.net/coverage/lcov.php";
+
+# Names of the GCOV kernel module
+our @gcovmod = ("gcov-prof", "gcov-proc");
+
+# Directory containing gcov kernel files
+our $gcov_dir = "/proc/gcov";
+
+# The location of the insmod tool
+our $insmod_tool	= "/sbin/insmod";
+
+# The location of the modprobe tool
+our $modprobe_tool	= "/sbin/modprobe";
+
+# The location of the rmmod tool
+our $rmmod_tool		= "/sbin/rmmod";
+
+# Where to create temporary directories
+our $tmp_dir		= "/tmp";
+
+# How to prefix a temporary directory name
+our $tmp_prefix		= "tmpdir";
+
+
+# Prototypes
+sub print_usage(*);
+sub check_options();
+sub userspace_reset();
+sub userspace_capture();
+sub kernel_reset();
+sub kernel_capture();
+sub add_traces();
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$$;$$);
+sub add_counts($$);
+sub merge_checksums($$$);
+sub combine_info_entries($$$);
+sub combine_info_files($$);
+sub write_info_file(*$);
+sub extract();
+sub remove();
+sub list();
+sub get_common_filename($$);
+sub read_diff($);
+sub diff();
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+
+sub info(@);
+sub unload_module($);
+sub check_and_load_kernel_module();
+sub create_temp_dir();
+sub transform_pattern($);
+
+
+# Global variables & initialization
+our @directory;		# Specifies where to get coverage data from
+our @kernel_directory;	# If set, captures only from specified kernel subdirs
+our @add_tracefile;	# If set, reads in and combines all files in list
+our $list;		# If set, list contents of tracefile
+our $extract;		# If set, extracts parts of tracefile
+our $remove;		# If set, removes parts of tracefile
+our $diff;		# If set, modifies tracefile according to diff
+our $reset;		# If set, reset all coverage data to zero
+our $capture;		# If set, capture data
+our $output_filename;	# Name for file to write coverage data to
+our $test_name = "";	# Test case name
+our $quiet = "";	# If set, suppress information messages
+our $help;		# Help option flag
+our $version;		# Version option flag
+our $nochecksum ="";	# If set, don't calculate a checksum for each line
+our $convert_filenames;	# If set, convert filenames when applying diff
+our $strip;		# If set, strip leading directories when applying diff
+our $need_unload;	# If set, unload gcov kernel module
+our $temp_dir_name;	# Name of temporary directory
+our $cwd = `pwd`;	# Current working directory
+our $to_file;		# If set, indicates that output is written to a file
+our $follow;		# If set, indicates that find shall follow links
+our $diff_path = "";	# Path removed from tracefile when applying diff
+our $config;		# Configuration file contents
+chomp($cwd);
+our $tool_dir = dirname($0);	# Directory where genhtml tool is installed
+
+
+#
+# Code entry point
+#
+
+# Add current working directory if $tool_dir is not already an absolute path
+if (! ($tool_dir =~ /^\/(.*)$/))
+{
+	$tool_dir = "$cwd/$tool_dir";
+}
+
+# Read configuration file if available
+if (-r $ENV{"HOME"}."/.lcovrc")
+{
+	$config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+	$config = read_config("/etc/lcovrc");
+}
+
+if ($config)
+{
+	# Copy configuration file values to variables
+	apply_config({
+		"lcov_gcov_dir"		=> \$gcov_dir,
+		"lcov_insmod_tool"	=> \$insmod_tool,
+		"lcov_modprobe_tool"	=> \$modprobe_tool,
+		"lcov_rmmod_tool"	=> \$rmmod_tool,
+		"lcov_tmp_dir"		=> \$tmp_dir});
+}
+
+# Parse command line options
+if (!GetOptions("directory|d|di=s" => \@directory,
+		"add-tracefile=s" => \@add_tracefile,
+		"list=s" => \$list,
+		"kernel-directory=s" => \@kernel_directory,
+		"extract=s" => \$extract,
+		"remove=s" => \$remove,
+		"diff=s" => \$diff,
+		"no-checksum" => \$nochecksum,
+		"convert-filenames" => \$convert_filenames,
+		"strip=i" => \$strip,
+		"capture|c" => \$capture,
+		"output-file=s" => \$output_filename,
+		"test-name=s" => \$test_name,
+		"zerocounters" => \$reset,
+		"quiet" => \$quiet,
+		"help" => \$help,
+		"version" => \$version,
+		"follow" => \$follow,
+		"path=s" => \$diff_path
+		))
+{
+	print_usage(*STDERR);
+	exit(1);
+}
+
+# Check for help option
+if ($help)
+{
+	print_usage(*STDOUT);
+	exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+	print("$lcov_version\n");
+	exit(0);
+}
+
+# Normalize --path text
+$diff_path =~ s/\/$//;
+
+# Check for valid options
+check_options();
+
+# Only --extract, --remove and --diff allow unnamed parameters
+if (@ARGV && !($extract || $remove || $diff))
+{
+	print_usage(*STDERR);
+	exit(1);
+}
+
+# Check for output filename
+$to_file = ($output_filename && ($output_filename ne "-"));
+
+if ($capture)
+{
+	if (!$to_file)
+	{
+		# Option that tells geninfo to write to stdout
+		$output_filename = "-";
+	}
+}
+
+# Check for requested functionality
+if ($reset)
+{
+	# Differentiate between user space and kernel reset
+	if (@directory)
+	{
+		userspace_reset();
+	}
+	else
+	{
+		kernel_reset();
+	}
+}
+elsif ($capture)
+{
+	# Differentiate between user space and kernel 
+	if (@directory)
+	{
+		userspace_capture();
+	}
+	else
+	{
+		kernel_capture();
+	}
+}
+elsif (@add_tracefile)
+{
+	add_traces();
+}
+elsif ($remove)
+{
+	remove();
+}
+elsif ($extract)
+{
+	extract();
+}
+elsif ($list)
+{
+	list();
+}
+elsif ($diff)
+{
+	if (scalar(@ARGV) != 1)
+	{
+		die("ERROR: option --diff requires one additional argument!\n");
+	}
+	diff();
+}
+
+info("Done.\n");
+exit(0);
+
+# Check for follow option
+if ($follow)
+{
+	$follow = "--follow";
+}
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+	local *HANDLE = $_[0];
+	my $tool_name = basename($0);
+
+	print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS]
+
+Use lcov to collect coverage data from either the currently running Linux
+kernel or from a user space application. Specify the --directory option to
+get coverage data for a user space program.
+
+  -h, --help                      Print this help, then exit
+  -v, --version                   Print version number, then exit
+  -q, --quiet                     Do not print progress messages
+  -z, --zerocounters              Reset all execution counts to zero
+  -c, --capture                   Capture coverage data
+  -t, --test-name NAME            Specify test name to be stored with data
+  -o, --output-file FILENAME      Write data to FILENAME instead of stdout
+  -d, --directory DIR             Use .da/.gcda files in DIR instead of kernel
+  -f, --follow                    Follow links when searching .da/.gcda files
+  -k, --kernel-directory KDIR     Capture kernel coverage data only from KDIR
+  -a, --add-tracefile FILE        Add contents of tracefiles
+  -e, --extract FILE PATTERN      Extract files matching PATTERN from FILE
+  -r, --remove FILE PATTERN       Remove files matching PATTERN from FILE
+  -l, --list FILE                 List contents of tracefile FILE
+      --diff FILE DIFF            Transform tracefile FILE according to DIFF
+      --no-checksum               Do not calculate a checksum for each line
+      --convert-filenames         Convert filenames when applying diff
+      --strip DEPTH               Strip initial DEPTH directory levels in diff
+      --path PATH                 Strip PATH from tracefile when applying diff
+
+See $lcov_url for more information about this tool.
+END_OF_USAGE
+	;
+}
+
+
+#
+# check_options()
+#
+# Check for valid combination of command line options. Die on error.
+#
+
+sub check_options()
+{
+	my $i = 0;
+
+	# Count occurrence of mutually exclusive options
+	$reset && $i++;
+	$capture && $i++;
+	@add_tracefile && $i++;
+	$extract && $i++;
+	$remove && $i++;
+	$list && $i++;
+	$diff && $i++;
+	
+	if ($i == 0)
+	{
+		print(STDERR "Need one of the options -z, -c, -e, -a, -r, -l ".
+			     "or --diff\n");
+		print_usage(*STDERR);
+		exit(2);
+	}
+	elsif ($i > 1)
+	{
+		die("ERROR: only one of -z, -c, -e, -a, -r, -l or ".
+		    "--diff allowed!\n");
+	}
+}
+
+
+#
+# userspace_reset()
+#
+# Reset coverage data found in DIRECTORY by deleting all contained .da files.
+#
+# Die on error.
+#
+
+sub userspace_reset()
+{
+	my $current_dir;
+	my @file_list;
+
+	foreach $current_dir (@directory)
+	{
+		info("Deleting all .da and .gcda files in $current_dir and ".
+		     "subdirectories\n");
+		if ($follow)
+		{
+			@file_list =
+				`find $current_dir -follow \\( -name \\*.gcda -o -name \\*.da \\) -type f 2>/dev/null`;
+		}
+		else
+		{
+			@file_list =
+				`find $current_dir \\( -name \\*.gcda -o -name \\*.da \\) -type f 2>/dev/null`;
+		}
+		chomp(@file_list);
+		foreach (@file_list)
+		{
+			unlink($_) or die("ERROR: cannot remove file $_!\n");
+		}
+	}
+}
+
+
+#
+# userspace_capture()
+#
+# Capture coverage data found in DIRECTORY and write it to OUTPUT_FILENAME
+# if specified, otherwise to STDOUT.
+#
+# Die on error.
+#
+
+sub userspace_capture()
+{
+	my @param;
+	my $file_list = join(" ", @directory);
+
+	info("Capturing coverage data from $file_list\n");
+	@param = ("$tool_dir/geninfo", @directory);
+	if ($output_filename)
+	{
+		@param = (@param, "--output-filename", $output_filename);
+	}
+	if ($test_name)
+	{
+		@param = (@param, "--test-name", $test_name);
+	}
+	if ($follow)
+	{
+		@param = (@param, "--follow");
+	}
+	if ($quiet)
+	{
+		@param = (@param, "--quiet");
+	}
+	if ($nochecksum)
+	{
+		@param = (@param, "--no-checksum");
+	}
+
+	system(@param);
+	exit($? >> 8);
+}
+
+
+#
+# kernel_reset()
+#
+# Reset kernel coverage.
+#
+# Die on error.
+#
+
+sub kernel_reset()
+{
+	local *HANDLE;
+	check_and_load_kernel_module();
+
+	info("Resetting kernel execution counters\n");
+	open(HANDLE, ">$gcov_dir/vmlinux") or
+		die("ERROR: cannot write to $gcov_dir/vmlinux!\n");
+	print(HANDLE "0");
+	close(HANDLE);
+
+	# Unload module if we loaded it in the first place
+	if ($need_unload)
+	{
+		unload_module($need_unload);
+	}
+}
+
+
+#
+# kernel_capture()
+#
+# Capture kernel coverage data and write it to OUTPUT_FILENAME if specified,
+# otherwise stdout.
+#
+
+sub kernel_capture()
+{
+	my @param;
+
+	check_and_load_kernel_module();
+
+	# Make sure the temporary directory is removed upon script termination
+	END
+	{
+		if ($temp_dir_name)
+		{
+			stat($temp_dir_name);
+			if (-r _)
+			{
+				info("Removing temporary directory ".
+				     "$temp_dir_name\n");
+
+				# Remove temporary directory
+				system("rm", "-rf", $temp_dir_name)
+					and warn("WARNING: cannot remove ".
+						 "temporary directory ".
+						 "$temp_dir_name!\n");
+			}
+		}
+	}
+
+	# Get temporary directory
+	$temp_dir_name = create_temp_dir();
+
+	info("Copying kernel data to temporary directory $temp_dir_name\n");
+
+	if (!@kernel_directory)
+	{
+		# Copy files from gcov kernel directory
+		system("cp", "-dr", $gcov_dir, $temp_dir_name)
+			and die("ERROR: cannot copy files from $gcov_dir!\n");
+	}
+	else
+	{
+		# Prefix list of kernel sub-directories with the gcov kernel
+		# directory
+		my $file_list = join(" ", map {"$gcov_dir/$_";}
+				     @kernel_directory);
+
+		# Copy files from gcov kernel directory
+		system("cp", "-dr", $file_list, $temp_dir_name)
+			and die("ERROR: cannot copy files from $file_list!\n");
+	}
+
+	# Make directories writable
+	system("find", $temp_dir_name, "-type", "d", "-exec", "chmod", "u+w",
+	       "{}", ";")
+		and die("ERROR: cannot modify access rights for ".
+			"$temp_dir_name!\n");
+
+	# Make files writable
+	system("find", $temp_dir_name, "-type", "f", "-exec", "chmod", "u+w",
+	       "{}", ";")
+		and die("ERROR: cannot modify access rights for ".
+			"$temp_dir_name!\n");
+
+	# Capture data
+	info("Capturing coverage data from $temp_dir_name\n");
+	@param = ("$tool_dir/geninfo", $temp_dir_name);
+	if ($output_filename)
+	{
+		@param = (@param, "--output-filename", $output_filename);
+	}
+	if ($test_name)
+	{
+		@param = (@param, "--test-name", $test_name);
+	}
+	if ($follow)
+	{
+		@param = (@param, "--follow");
+	}
+	if ($quiet)
+	{
+		@param = (@param, "--quiet");
+	}
+	if ($nochecksum)
+	{
+		@param = (@param, "--no-checksum");
+	}
+	system(@param) and exit($? >> 8);
+
+
+	# Unload module if we loaded it in the first place
+	if ($need_unload)
+	{
+		unload_module($need_unload);
+	}
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+	if (!$quiet)
+	{
+		# Print info string
+		if ($to_file)
+		{
+			print(@_)
+		}
+		else
+		{
+			# Don't interfer with the .info output to STDOUT
+			printf(STDERR @_);
+		}
+	}
+}
+
+
+#
+# Check if the gcov kernel module is loaded. If it is, exit, if not, try
+# to load it.
+#
+# Die on error.
+#
+
+sub check_and_load_kernel_module()
+{
+	my $module_name;
+
+	# Is it loaded already?
+	stat("$gcov_dir");
+	if (-r _) { return(); }
+
+	info("Loading required gcov kernel module.\n");
+
+	# Do we have access to the insmod tool?
+	stat($insmod_tool);
+	if (!-x _)
+	{
+		die("ERROR: need insmod tool ($insmod_tool) to access kernel ".
+		    "coverage data!\n");
+	}
+	# Do we have access to the modprobe tool?
+	stat($modprobe_tool);
+	if (!-x _)
+	{
+		die("ERROR: need modprobe tool ($modprobe_tool) to access ".
+		    "kernel coverage data!\n");
+	}
+
+	# Try some possibilities of where the gcov kernel module may be found
+	foreach $module_name (@gcovmod)
+	{
+		# Try to load module from system wide module directory
+		# /lib/modules
+		if (system_no_output(3, $modprobe_tool, $module_name) == 0)
+		{
+			# Succeeded
+			$need_unload = $module_name;
+			return();
+		}
+
+		# Try to load linux 2.5/2.6 module from tool directory
+		if (system_no_output(3, $insmod_tool,
+				      "$tool_dir/$module_name.ko") == 0)
+		{
+			# Succeeded
+			$need_unload = $module_name;
+			return();
+		}
+
+		# Try to load linux 2.4 module from tool directory
+		if (system_no_output(3, $insmod_tool,
+				     "$tool_dir/$module_name.o") == 0)
+		{
+			# Succeeded
+			$need_unload = $module_name;
+			return();
+		}
+	}
+
+	# Hm, loading failed - maybe we aren't root?
+	if ($> != 0)
+	{
+		die("ERROR: need root access to load kernel module!\n");
+	}
+
+	die("ERROR: cannot load required gcov kernel module!\n");
+}
+
+
+#
+# unload_module()
+#
+# Unload the gcov kernel module.
+#
+
+sub unload_module($)
+{
+	my $module = $_[0];
+
+	info("Unloading kernel module $module\n");
+
+	# Do we have access to the rmmod tool?
+	stat($rmmod_tool);
+	if (!-x _)
+	{
+		warn("WARNING: cannot execute rmmod tool at $rmmod_tool - ".
+		     "gcov module still loaded!\n");
+	}
+
+	# Unload gcov kernel module
+	system_no_output(1, $rmmod_tool, $module)
+		and warn("WARNING: cannot unload gcov kernel module ".
+		         "$module!\n");
+}
+
+
+#
+# create_temp_dir()
+#
+# Create a temporary directory and return its path.
+#
+# Die on error.
+#
+
+sub create_temp_dir()
+{
+	my $dirname;
+	my $number = sprintf("%d", rand(1000));
+
+	# Endless loops are evil
+	while ($number++ < 1000)
+	{
+		$dirname = "$tmp_dir/$tmp_prefix$number";
+		stat($dirname);
+		if (-e _) { next; }
+
+		mkdir($dirname)
+			or die("ERROR: cannot create temporary directory ".
+			       "$dirname!\n");
+
+		return($dirname);
+	}
+
+	die("ERROR: cannot create temporary directory in $tmp_dir!\n");
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test"  -> \%testdata
+#        "sum"   -> \%sumcount
+#        "func"  -> \%funcdata
+#        "found" -> $lines_found (number of instrumented lines found in file)
+#	 "hit"   -> $lines_hit (number of executed lines in file)
+#        "check" -> \%checkdata
+#
+# %testdata: name of test affecting this file -> \%testcount
+#
+# %testcount: line number -> execution count for a single test
+# %sumcount : line number -> execution count for all tests
+# %funcdata : line number -> name of function beginning at that line
+# %checkdata: line number -> checksum of source code line
+# 
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error.
+#
+
+sub read_info_file($)
+{
+	my $tracefile = $_[0];		# Name of tracefile
+	my %result;			# Resulting hash: file -> data
+	my $data;			# Data handle for current entry
+	my $testdata;			#       "             "
+	my $testcount;			#       "             "
+	my $sumcount;			#       "             "
+	my $funcdata;			#       "             "
+	my $checkdata;			#       "             "
+	my $line;			# Current line read from .info file
+	my $testname;			# Current test name
+	my $filename;			# Current filename
+	my $hitcount;			# Count for lines hit
+	my $count;			# Execution count of current line
+	my $negative;			# If set, warn about negative counts
+	my $checksum;			# Checksum of current line
+	local *INFO_HANDLE;		# Filehandle for .info file
+
+	info("Reading tracefile $tracefile\n");
+
+	# Check if file exists and is readable
+	stat($_[0]);
+	if (!(-r _))
+	{
+		die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	# Check if this is really a plain file
+	if (!(-f _))
+	{
+		die("ERROR: not a plain file: $_[0]!\n");
+	}
+
+	# Check for .gz extension
+	if ($_[0] =~ /\.gz$/)
+	{
+		# Check for availability of GZIP tool
+		system_no_output(1, "gunzip" ,"-h")
+			and die("ERROR: gunzip command not available!\n");
+
+		# Check integrity of compressed file
+		system_no_output(1, "gunzip", "-t", $_[0])
+			and die("ERROR: integrity check failed for ".
+				"compressed file $_[0]!\n");
+
+		# Open compressed file
+		open(INFO_HANDLE, "gunzip -c $_[0]|")
+			or die("ERROR: cannot start gunzip to decompress ".
+			       "file $_[0]!\n");
+	}
+	else
+	{
+		# Open decompressed file
+		open(INFO_HANDLE, $_[0])
+			or die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	$testname = "";
+	while (<INFO_HANDLE>)
+	{
+		chomp($_);
+		$line = $_;
+
+		# Switch statement
+		foreach ($line)
+		{
+			/^TN:(\w*(,\w+)?)/ && do
+			{
+				# Test name information found
+				$testname = defined($1) ? $1 : "";
+				last;
+			};
+
+			/^[SK]F:(.*)/ && do
+			{
+				# Filename information found
+				# Retrieve data for new entry
+				$filename = $1;
+
+				$data = $result{$filename};
+				($testdata, $sumcount, $funcdata, $checkdata) =
+					get_info_entry($data);
+
+				if (defined($testname))
+				{
+					$testcount = $testdata->{$testname};
+				}
+				else
+				{
+					my %new_hash;
+					$testcount = \%new_hash;
+				}
+				last;
+			};
+
+			/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
+			{
+				# Fix negative counts
+				$count = $2 < 0 ? 0 : $2;
+				if ($2 < 0)
+				{
+					$negative = 1;
+				}
+				# Execution count found, add to structure
+				# Add summary counts
+				$sumcount->{$1} += $count;
+
+				# Add test-specific counts
+				if (defined($testname))
+				{
+					$testcount->{$1} += $count;
+				}
+
+				# Store line checksum if available
+				if (defined($3))
+				{
+					$checksum = substr($3, 1);
+
+					# Does it match a previous definition
+					if (defined($checkdata->{$1}) &&
+					    ($checkdata->{$1} ne $checksum))
+					{
+						die("ERROR: checksum mismatch ".
+						    "at $filename:$1\n");
+					}
+
+					$checkdata->{$1} = $checksum;
+				}
+				last;
+			};
+
+			/^FN:(\d+),([^,]+)/ && do
+			{
+				# Function data found, add to structure
+				$funcdata->{$1} = $2;
+				last;
+			};
+
+			/^end_of_record/ && do
+			{
+				# Found end of section marker
+				if ($filename)
+				{
+					# Store current section data
+					if (defined($testname))
+					{
+						$testdata->{$testname} =
+							$testcount;
+					}	
+					set_info_entry($data, $testdata,
+						       $sumcount, $funcdata,
+						       $checkdata);
+					$result{$filename} = $data;
+					last;
+				}
+			};
+
+			# default
+			last;
+		}
+	}
+	close(INFO_HANDLE);
+
+	# Calculate lines_found and lines_hit for each file
+	foreach $filename (keys(%result))
+	{
+		$data = $result{$filename};
+
+		($testdata, $sumcount, $funcdata) = get_info_entry($data);
+
+		# Filter out empty test cases
+		if (scalar(keys(%{$sumcount})) == 0)
+		{
+			delete($result{$filename});
+			next;
+		}
+		foreach $testname (keys(%{$testdata}))
+		{
+			if (!defined($testdata->{$testname}) ||
+			    scalar(keys(%{$testdata->{$testname}})) == 0)
+			{
+				delete($testdata->{$testname});
+			}
+		}
+
+		$data->{"found"} = scalar(keys(%{$sumcount}));
+		$hitcount = 0;
+
+		foreach (keys(%{$sumcount}))
+		{
+			if ($sumcount->{$_} > 0) { $hitcount++; }
+		}
+
+		$data->{"hit"} = $hitcount;
+
+		$result{$filename} = $data;
+	}
+
+	if (scalar(keys(%result)) == 0)
+	{
+		die("ERROR: no valid records found in tracefile $tracefile\n");
+	}
+	if ($negative)
+	{
+		warn("WARNING: negative counts found in tracefile ".
+		     "$tracefile\n");
+	}
+
+	return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
+#  ref, lines found, lines hit)
+#
+
+sub get_info_entry($)
+{
+	my $testdata_ref = $_[0]->{"test"};
+	my $sumcount_ref = $_[0]->{"sum"};
+	my $funcdata_ref = $_[0]->{"func"};
+	my $checkdata_ref = $_[0]->{"check"};
+	my $lines_found = $_[0]->{"found"};
+	my $lines_hit = $_[0]->{"hit"};
+
+	return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
+		$lines_found, $lines_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
+#                checkdata_ref[,lines_found, lines_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$$;$$)
+{
+	my $data_ref = $_[0];
+
+	$data_ref->{"test"} = $_[1];
+	$data_ref->{"sum"} = $_[2];
+	$data_ref->{"func"} = $_[3];
+	$data_ref->{"check"} = $_[4];
+
+	if (defined($_[5])) { $data_ref->{"found"} = $_[5]; }
+	if (defined($_[6])) { $data_ref->{"hit"} = $_[6]; }
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+#   line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+	my %data1 = %{$_[0]};	# Hash 1
+	my %data2 = %{$_[1]};	# Hash 2
+	my %result;		# Resulting hash
+	my $line;		# Current line iteration scalar
+	my $data1_count;	# Count of line in hash1
+	my $data2_count;	# Count of line in hash2
+	my $found = 0;		# Total number of lines found
+	my $hit = 0;		# Number of lines with a count > 0
+
+	foreach $line (keys(%data1))
+	{
+		$data1_count = $data1{$line};
+		$data2_count = $data2{$line};
+
+		# Add counts if present in both hashes
+		if (defined($data2_count)) { $data1_count += $data2_count; }
+
+		# Store sum in %result
+		$result{$line} = $data1_count;
+
+		$found++;
+		if ($data1_count > 0) { $hit++; }
+	}
+
+	# Add lines unique to data2
+	foreach $line (keys(%data2))
+	{
+		# Skip lines already in data1
+		if (defined($data1{$line})) { next; }
+
+		# Copy count from data2
+		$result{$line} = $data2{$line};
+
+		$found++;
+		if ($result{$line} > 0) { $hit++; }
+	}
+
+	return (\%result, $found, $hit);
+}
+
+
+#
+# merge_checksums(ref1, ref2, filename)
+#
+# REF1 and REF2 are references to hashes containing a mapping
+#
+#   line number -> checksum
+#
+# Merge checksum lists defined in REF1 and REF2 and return reference to
+# resulting hash. Die if a checksum for a line is defined in both hashes
+# but does not match.
+#
+
+sub merge_checksums($$$)
+{
+	my $ref1 = $_[0];
+	my $ref2 = $_[1];
+	my $filename = $_[2];
+	my %result;
+	my $line;
+
+	foreach $line (keys(%{$ref1}))
+	{
+		if (defined($ref2->{$line}) &&
+		    ($ref1->{$line} ne $ref2->{$line}))
+		{
+			die("ERROR: checksum mismatch at $filename:$line\n");
+		}
+		$result{$line} = $ref1->{$line};
+	}
+
+	foreach $line (keys(%{$ref2}))
+	{
+		$result{$line} = $ref2->{$line};
+	}
+
+	return \%result;
+}
+
+
+#
+# merge_func_data(ref1, ref2, filename)
+#
+
+sub merge_func_data($$$)
+{
+	my $ref1 = $_[0];
+	my $ref2 = $_[1];
+	my $filename = $_[2];
+	my %result;
+	my %ignore;
+	my $line1;
+	my $line2;
+
+	# Check for mismatch
+	foreach $line1 (keys(%{$ref1}))
+	{
+		foreach $line2 (keys(%{$ref2}))
+		{
+			if (($ref1->{$line1} eq $ref2->{$line2}) &&
+			    ($line1 != $line2))
+			{
+				warn("WARNING: function data mismatch at ".
+				     "$filename:$ref1->{$line1}\n");
+				$ignore{$line2} = 1;
+			}
+		}
+	}
+
+	# Merge
+	foreach (keys(%{$ref1}))
+	{
+		$result{$_} = $ref1->{$_};
+	}
+
+	foreach (keys(%{$ref2}))
+	{
+		if (!$ignore{$_})
+		{
+			$result{$_} = $ref2->{$_};
+		}
+	}
+
+	return \%result;
+}
+
+
+#
+# combine_info_entries(entry_ref1, entry_ref2, filename)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$$)
+{
+	my $entry1 = $_[0];	# Reference to hash containing first entry
+	my $testdata1;
+	my $sumcount1;
+	my $funcdata1;
+	my $checkdata1;
+
+	my $entry2 = $_[1];	# Reference to hash containing second entry
+	my $testdata2;
+	my $sumcount2;
+	my $funcdata2;
+	my $checkdata2;
+
+	my %result;		# Hash containing combined entry
+	my %result_testdata;
+	my $result_sumcount = {};
+	my $result_funcdata;
+	my $lines_found;
+	my $lines_hit;
+
+	my $testname;
+	my $filename = $_[2];
+
+	# Retrieve data
+	($testdata1, $sumcount1, $funcdata1, $checkdata1) =
+		get_info_entry($entry1);
+	($testdata2, $sumcount2, $funcdata2, $checkdata2) =
+		get_info_entry($entry2);
+
+	# Merge checksums
+	$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
+
+	# Combine funcdata
+	$result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
+	
+	# Combine testdata
+	foreach $testname (keys(%{$testdata1}))
+	{
+		if (defined($testdata2->{$testname}))
+		{
+			# testname is present in both entries, requires
+			# combination
+			($result_testdata{$testname}) =
+				add_counts($testdata1->{$testname},
+					   $testdata2->{$testname});
+		}
+		else
+		{
+			# testname only present in entry1, add to result
+			$result_testdata{$testname} = $testdata1->{$testname};
+		}
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+
+	foreach $testname (keys(%{$testdata2}))
+	{
+		# Skip testnames already covered by previous iteration
+		if (defined($testdata1->{$testname})) { next; }
+
+		# testname only present in entry2, add to result hash
+		$result_testdata{$testname} = $testdata2->{$testname};
+
+		# update sum count hash
+		($result_sumcount, $lines_found, $lines_hit) =
+			add_counts($result_sumcount,
+				   $result_testdata{$testname});
+	}
+	
+	# Calculate resulting sumcount
+
+	# Store result
+	set_info_entry(\%result, \%result_testdata, $result_sumcount,
+		       $result_funcdata, $checkdata1, $lines_found,
+		       $lines_hit);
+
+	return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+	my %hash1 = %{$_[0]};
+	my %hash2 = %{$_[1]};
+	my $filename;
+
+	foreach $filename (keys(%hash2))
+	{
+		if ($hash1{$filename})
+		{
+			# Entry already exists in hash1, combine them
+			$hash1{$filename} =
+				combine_info_entries($hash1{$filename},
+						     $hash2{$filename},
+						     $filename);
+		}
+		else
+		{
+			# Entry is unique in both hashes, simply add to
+			# resulting hash
+			$hash1{$filename} = $hash2{$filename};
+		}
+	}
+
+	return(\%hash1);
+}
+
+
+#
+# add_traces()
+#
+
+sub add_traces()
+{
+	my $total_trace;
+	my $current_trace;
+	my $tracefile;
+	local *INFO_HANDLE;
+
+	info("Combining tracefiles.\n");
+
+	foreach $tracefile (@add_tracefile)
+	{
+		$current_trace = read_info_file($tracefile);
+		if ($total_trace)
+		{
+			$total_trace = combine_info_files($total_trace,
+							  $current_trace);
+		}
+		else
+		{
+			$total_trace = $current_trace;
+		}
+	}
+
+	# Write combined data
+	if ($to_file)
+	{
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">$output_filename")
+			or die("ERROR: cannot write to $output_filename!\n");
+		write_info_file(*INFO_HANDLE, $total_trace);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		write_info_file(*STDOUT, $total_trace);
+	}
+}
+
+
+#
+# write_info_file(filehandle, data)
+#
+
+sub write_info_file(*$)
+{
+	local *INFO_HANDLE = $_[0];
+	my %data = %{$_[1]};
+	my $source_file;
+	my $entry;
+	my $testdata;
+	my $sumcount;
+	my $funcdata;
+	my $checkdata;
+	my $testname;
+	my $line;
+	my $testcount;
+	my $found;
+	my $hit;
+
+	foreach $source_file (keys(%data))
+	{
+		$entry = $data{$source_file};
+		($testdata, $sumcount, $funcdata, $checkdata) =
+			get_info_entry($entry);
+		foreach $testname (keys(%{$testdata}))
+		{
+			$testcount = $testdata->{$testname};
+			$found = 0;
+			$hit   = 0;
+
+			print(INFO_HANDLE "TN:$testname\n");
+			print(INFO_HANDLE "SF:$source_file\n");
+
+			foreach $line (sort({$a <=> $b} keys(%{$funcdata})))
+			{
+				print(INFO_HANDLE "FN:$line,".
+				      $funcdata->{$line}."\n");
+			}
+
+			foreach $line (sort({$a <=> $b} keys(%{$testcount})))
+			{
+				print(INFO_HANDLE "DA:$line,".
+				      $testcount->{$line}.
+				      (defined($checkdata->{$line}) &&
+				       !$nochecksum ?
+				       ",".$checkdata->{$line} : "")."\n");
+				$found++;
+				if ($testcount->{$line} > 0)
+				{
+					$hit++;
+				}
+
+			}
+			print(INFO_HANDLE "LF:$found\n");
+			print(INFO_HANDLE "LH:$hit\n");
+			print(INFO_HANDLE "end_of_record\n");
+		}
+	}
+}
+
+
+#
+# transform_pattern(pattern)
+#
+# Transform shell wildcard expression to equivalent PERL regular expression.
+# Return transformed pattern.
+#
+
+sub transform_pattern($)
+{
+	my $pattern = $_[0];
+
+	# Escape special chars
+
+	$pattern =~ s/\\/\\\\/g;
+	$pattern =~ s/\//\\\//g;
+	$pattern =~ s/\^/\\\^/g;
+	$pattern =~ s/\$/\\\$/g;
+	$pattern =~ s/\(/\\\(/g;
+	$pattern =~ s/\)/\\\)/g;
+	$pattern =~ s/\[/\\\[/g;
+	$pattern =~ s/\]/\\\]/g;
+	$pattern =~ s/\{/\\\{/g;
+	$pattern =~ s/\}/\\\}/g;
+	$pattern =~ s/\./\\\./g;
+	$pattern =~ s/\,/\\\,/g;
+	$pattern =~ s/\|/\\\|/g;
+	$pattern =~ s/\+/\\\+/g;
+	$pattern =~ s/\!/\\\!/g;
+
+	# Transform ? => (.) and * => (.*)
+
+	$pattern =~ s/\*/\(\.\*\)/g;
+	$pattern =~ s/\?/\(\.\)/g;
+
+	return $pattern;
+}
+
+
+#
+# extract()
+#
+
+sub extract()
+{
+	my $data = read_info_file($extract);
+	my $filename;
+	my $keep;
+	my $pattern;
+	my @pattern_list;
+	my $extracted = 0;
+	local *INFO_HANDLE;
+
+	# Need perlreg expressions instead of shell pattern
+	@pattern_list = map({ transform_pattern($_); } @ARGV);
+
+	# Filter out files which do not match any pattern
+	foreach $filename (sort(keys(%{$data})))
+	{
+		$keep = 0;
+
+		foreach $pattern (@pattern_list)
+		{
+			$keep ||= ($filename =~ (/^$pattern$/));
+		}
+
+
+		if (!$keep)
+		{
+			delete($data->{$filename});
+		}
+		else
+		{
+			info("Extracting $filename\n"),
+			$extracted++;
+		}
+	}
+
+	# Write extracted data
+	if ($to_file)
+	{
+		info("Extracted $extracted files\n");
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">$output_filename")
+			or die("ERROR: cannot write to $output_filename!\n");
+		write_info_file(*INFO_HANDLE, $data);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		write_info_file(*STDOUT, $data);
+	}
+}
+
+
+#
+# remove()
+#
+
+sub remove()
+{
+	my $data = read_info_file($remove);
+	my $filename;
+	my $match_found;
+	my $pattern;
+	my @pattern_list;
+	my $removed = 0;
+	local *INFO_HANDLE;
+
+	# Need perlreg expressions instead of shell pattern
+	@pattern_list = map({ transform_pattern($_); } @ARGV);
+
+	# Filter out files that match the pattern
+	foreach $filename (sort(keys(%{$data})))
+	{
+		$match_found = 0;
+
+		foreach $pattern (@pattern_list)
+		{
+			$match_found ||= ($filename =~ (/$pattern$/));
+		}
+
+
+		if ($match_found)
+		{
+			delete($data->{$filename});
+			info("Removing $filename\n"),
+			$removed++;
+		}
+	}
+
+	# Write data
+	if ($to_file)
+	{
+		info("Deleted $removed files\n");
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">$output_filename")
+			or die("ERROR: cannot write to $output_filename!\n");
+		write_info_file(*INFO_HANDLE, $data);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		write_info_file(*STDOUT, $data);
+	}
+}
+
+
+#
+# list()
+#
+
+sub list()
+{
+	my $data = read_info_file($list);
+	my $filename;
+	my $found;
+	my $hit;
+	my $entry;
+
+	info("Listing contents of $list:\n");
+
+	# List all files
+	foreach $filename (sort(keys(%{$data})))
+	{
+		$entry = $data->{$filename};
+		(undef, undef, undef, undef, $found, $hit) =
+			get_info_entry($entry);
+		printf("$filename: $hit of $found lines hit\n");
+	}
+}
+
+
+#
+# get_common_filename(filename1, filename2)
+#
+# Check for filename components which are common to FILENAME1 and FILENAME2.
+# Upon success, return
+#
+#   (common, path1, path2)
+#
+#  or 'undef' in case there are no such parts.
+#
+
+sub get_common_filename($$)
+{
+        my @list1 = split("/", $_[0]);
+        my @list2 = split("/", $_[1]);
+	my @result;
+
+	# Work in reverse order, i.e. beginning with the filename itself
+	while (@list1 && @list2 && ($list1[$#list1] eq $list2[$#list2]))
+	{
+		unshift(@result, pop(@list1));
+		pop(@list2);
+	}
+
+	# Did we find any similarities?
+	if (scalar(@result) > 0)
+	{
+	        return (join("/", @result), join("/", @list1),
+			join("/", @list2));
+	}
+	else
+	{
+		return undef;
+	}
+}
+
+
+#
+# strip_directories($path, $depth)
+#
+# Remove DEPTH leading directory levels from PATH.
+#
+
+sub strip_directories($$)
+{
+	my $filename = $_[0];
+	my $depth = $_[1];
+	my $i;
+
+	if (!defined($depth) || ($depth < 1))
+	{
+		return $filename;
+	}
+	for ($i = 0; $i < $depth; $i++)
+	{
+		$filename =~ s/^[^\/]*\/+(.*)$/$1/;
+	}
+	return $filename;
+}
+
+
+#
+# read_diff(filename)
+#
+# Read diff output from FILENAME to memory. The diff file has to follow the
+# format generated by 'diff -u'. Returns a list of hash references:
+#
+#   (mapping, path mapping)
+#
+#   mapping:   filename -> reference to line hash
+#   line hash: line number in new file -> corresponding line number in old file
+#
+#   path mapping:  filename -> old filename
+#
+# Die in case of error.
+#
+
+sub read_diff($)
+{
+	my $diff_file = $_[0];	# Name of diff file
+	my %diff;		# Resulting mapping filename -> line hash
+	my %paths;		# Resulting mapping old path  -> new path
+	my $mapping;		# Reference to current line hash
+	my $line;		# Contents of current line
+	my $num_old;		# Current line number in old file
+	my $num_new;		# Current line number in new file
+	my $file_old;		# Name of old file in diff section
+	my $file_new;		# Name of new file in diff section
+	my $filename;		# Name of common filename of diff section
+	my $in_block = 0;	# Non-zero while we are inside a diff block
+	local *HANDLE;		# File handle for reading the diff file
+
+	info("Reading diff $diff_file\n");
+
+	# Check if file exists and is readable
+	stat($diff_file);
+	if (!(-r _))
+	{
+		die("ERROR: cannot read file $diff_file!\n");
+	}
+
+	# Check if this is really a plain file
+	if (!(-f _))
+	{
+		die("ERROR: not a plain file: $diff_file!\n");
+	}
+
+	# Check for .gz extension
+	if ($diff_file =~ /\.gz$/)
+	{
+		# Check for availability of GZIP tool
+		system_no_output(1, "gunzip", "-h")
+			and die("ERROR: gunzip command not available!\n");
+
+		# Check integrity of compressed file
+		system_no_output(1, "gunzip", "-t", $diff_file)
+			and die("ERROR: integrity check failed for ".
+				"compressed file $diff_file!\n");
+
+		# Open compressed file
+		open(HANDLE, "gunzip -c $diff_file|")
+			or die("ERROR: cannot start gunzip to decompress ".
+			       "file $_[0]!\n");
+	}
+	else
+	{
+		# Open decompressed file
+		open(HANDLE, $diff_file)
+			or die("ERROR: cannot read file $_[0]!\n");
+	}
+
+	# Parse diff file line by line
+	while (<HANDLE>)
+	{
+		chomp($_);
+		$line = $_;
+
+		foreach ($line)
+		{
+			# Filename of old file:
+			# --- <filename> <date>
+			/^--- (\S+)/ && do
+			{
+				$file_old = strip_directories($1, $strip);
+				last;
+			};
+			# Filename of new file:
+			# +++ <filename> <date>
+			/^\+\+\+ (\S+)/ && do
+			{
+				# Add last file to resulting hash
+				if ($filename)
+				{
+					my %new_hash;
+					$diff{$filename} = $mapping;
+					$mapping = \%new_hash;
+				}
+				$file_new = strip_directories($1, $strip);
+				$filename = $file_old;
+				$paths{$filename} = $file_new;
+				$num_old = 1;
+				$num_new = 1;
+				last;
+			};
+			# Start of diff block:
+			# @@ -old_start,old_num, +new_start,new_num @@
+			/^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@$/ && do
+			{
+			$in_block = 1;
+			while ($num_old < $1)
+			{
+				$mapping->{$num_new} = $num_old;
+				$num_old++;
+				$num_new++;
+			}
+			last;
+			};
+			# Unchanged line
+			# <line starts with blank>
+			/^ / && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$mapping->{$num_new} = $num_old;
+				$num_old++;
+				$num_new++;
+				last;
+			};
+			# Line as seen in old file
+			# <line starts with '-'>
+			/^-/ && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$num_old++;
+				last;
+			};
+			# Line as seen in new file
+			# <line starts with '+'>
+			/^\+/ && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$num_new++;
+				last;
+			};
+			# Empty line
+			/^$/ && do
+			{
+				if ($in_block == 0)
+				{
+					last;
+				}
+				$mapping->{$num_new} = $num_old;
+				$num_old++;
+				$num_new++;
+				last;
+			};
+		}
+	}
+
+	close(HANDLE);
+
+	# Add final diff file section to resulting hash
+	if ($filename)
+	{
+		$diff{$filename} = $mapping;
+	}
+
+	if (!%diff)
+	{
+		die("ERROR: no valid diff data found in $diff_file!\n".
+		    "Make sure to use 'diff -u' when generating the diff ".
+		    "file.\n");
+	}
+	return (\%diff, \%paths);
+}
+
+
+#
+# apply_diff($count_data, $line_hash)
+#
+# Transform count data using a mapping of lines:
+#
+#   $count_data: reference to hash: line number -> data
+#   $line_hash:  reference to hash: line number new -> line number old
+#
+# Return a reference to transformed count data.
+#
+
+sub apply_diff($$)
+{
+	my $count_data = $_[0];	# Reference to data hash: line -> hash
+	my $line_hash = $_[1];	# Reference to line hash: new line -> old line
+	my %result;		# Resulting hash
+	my $last_new = 0;	# Last new line number found in line hash
+	my $last_old = 0;	# Last old line number found in line hash
+
+	# Iterate all new line numbers found in the diff
+	foreach (sort({$a <=> $b} keys(%{$line_hash})))
+	{
+		$last_new = $_;
+		$last_old = $line_hash->{$last_new};
+
+		# Is there data associated with the corresponding old line?
+		if (defined($count_data->{$line_hash->{$_}}))
+		{
+			# Copy data to new hash with a new line number
+			$result{$_} = $count_data->{$line_hash->{$_}};
+		}
+	}
+	# Transform all other lines which come after the last diff entry
+	foreach (sort({$a <=> $b} keys(%{$count_data})))
+	{
+		if ($_ < $last_old)
+		{
+			# Skip lines which were covered by line hash
+			next;
+		}
+		# Copy data to new hash with an offset
+		$result{$_ + ($last_new - $last_old)} = $count_data->{$_};
+	}
+
+	return \%result;
+}
+
+
+#
+# get_line_hash($filename, $diff_data, $path_data)
+#
+# Find line hash in DIFF_DATA which matches FILENAME. On succes, return list
+# line hash. or undef in case of no match. Die if more than one line hashes in
+# DIFF_DATA match.
+#
+
+sub get_line_hash($$$)
+{
+	my $filename = $_[0];
+	my $diff_data = $_[1];
+	my $path_data = $_[2];
+	my $conversion;
+	my $old_path;
+	my $new_path;
+	my $diff_name;
+	my $common;
+	my $old_depth;
+	my $new_depth;
+
+	foreach (keys(%{$diff_data}))
+	{
+		# Try to match diff filename with filename
+		if ($filename =~ /^\Q$diff_path\E\/$_$/)
+		{
+			if ($diff_name)
+			{
+				# Two files match, choose the more specific one
+				# (the one with more path components)
+				$old_depth = ($diff_name =~ tr/\///);
+				$new_depth = (tr/\///);
+				if ($old_depth == $new_depth)
+				{
+					die("ERROR: diff file contains ".
+					    "ambiguous entries for ".
+					    "$filename\n");
+				}
+				elsif ($new_depth > $old_depth)
+				{
+					$diff_name = $_;
+				}
+			}
+			else
+			{
+				$diff_name = $_;
+			}
+		};
+	}
+	if ($diff_name)
+	{
+		# Get converted path
+		if ($filename =~ /^(.*)$diff_name$/)
+		{
+			($common, $old_path, $new_path) =
+				get_common_filename($filename,
+					$1.$path_data->{$diff_name});
+		}
+		return ($diff_data->{$diff_name}, $old_path, $new_path);
+	}
+	else
+	{
+		return undef;
+	}
+}
+
+
+#
+# convert_paths(trace_data, path_conversion_data)
+#
+# Rename all paths in TRACE_DATA which show up in PATH_CONVERSION_DATA.
+#
+
+sub convert_paths($$)
+{
+	my $trace_data = $_[0];
+	my $path_conversion_data = $_[1];
+	my $filename;
+	my $new_path;
+
+	if (scalar(keys(%{$path_conversion_data})) == 0)
+	{
+		info("No path conversion data available.\n");
+		return;
+	}
+
+	# Expand path conversion list
+	foreach $filename (keys(%{$path_conversion_data}))
+	{
+		$new_path = $path_conversion_data->{$filename};
+		while (($filename =~ s/^(.*)\/[^\/]+$/$1/) &&
+		       ($new_path =~ s/^(.*)\/[^\/]+$/$1/) &&
+		       ($filename ne $new_path))
+		{
+			$path_conversion_data->{$filename} = $new_path;
+		}
+	}
+
+	# Adjust paths
+	FILENAME: foreach $filename (keys(%{$trace_data}))
+	{
+		# Find a path in our conversion table that matches, starting
+		# with the longest path
+		foreach (sort({length($b) <=> length($a)}
+			      keys(%{$path_conversion_data})))
+		{
+			# Is this path a prefix of our filename?
+			if (!($filename =~ /^$_(.*)$/))
+			{
+				next;
+			}
+			$new_path = $path_conversion_data->{$_}.$1;
+
+			# Make sure not to overwrite an existing entry under
+			# that path name
+			if ($trace_data->{$new_path})
+			{
+				# Need to combine entries
+				$trace_data->{$new_path} =
+					combine_info_entries(
+						$trace_data->{$filename},
+						$trace_data->{$new_path},
+						$filename);
+			}
+			else
+			{
+				# Simply rename entry
+				$trace_data->{$new_path} =
+					$trace_data->{$filename};
+			}
+			delete($trace_data->{$filename});
+			next FILENAME;
+		}
+		info("No conversion available for filename $filename\n");
+	}
+}
+
+
+#
+# diff()
+#
+
+sub diff()
+{
+	my $trace_data = read_info_file($diff);
+	my $diff_data;
+	my $path_data;
+	my $old_path;
+	my $new_path;
+	my %path_conversion_data;
+	my $filename;
+	my $line_hash;
+	my $new_name;
+	my $entry;
+	my $testdata;
+	my $testname;
+	my $sumcount;
+	my $funcdata;
+	my $checkdata;
+	my $found;
+	my $hit;
+	my $converted = 0;
+	my $unchanged = 0;
+	local *INFO_HANDLE;
+
+	($diff_data, $path_data) = read_diff($ARGV[0]);
+
+        foreach $filename (sort(keys(%{$trace_data})))
+        {
+		# Find a diff section corresponding to this file
+		($line_hash, $old_path, $new_path) =
+			get_line_hash($filename, $diff_data, $path_data);
+		if (!$line_hash)
+		{
+			# There's no diff section for this file
+			$unchanged++;
+			next;
+		}
+		$converted++;
+		if ($old_path && $new_path && ($old_path ne $new_path))
+		{
+			$path_conversion_data{$old_path} = $new_path;
+		}
+		# Check for deleted files
+		if (scalar(keys(%{$line_hash})) == 0)
+		{
+			info("Removing $filename\n");
+			delete($trace_data->{$filename});
+			next;
+		}
+		info("Converting $filename\n");
+		$entry = $trace_data->{$filename};
+		($testdata, $sumcount, $funcdata, $checkdata) =
+			get_info_entry($entry);
+		# Convert test data
+		foreach $testname (keys(%{$testdata}))
+		{
+			$testdata->{$testname} =
+				apply_diff($testdata->{$testname}, $line_hash);
+			# Remove empty sets of test data
+			if (scalar(keys(%{$testdata->{$testname}})) == 0)
+			{
+				delete($testdata->{$testname});
+			}
+		}
+		# Rename test data to indicate conversion
+		foreach $testname (keys(%{$testdata}))
+		{
+			# Skip testnames which already contain an extension
+			if ($testname =~ /,[^,]+$/)
+			{
+				next;
+			}
+			# Check for name conflict
+			if (defined($testdata->{$testname.",diff"}))
+			{
+				# Add counts
+				($testdata->{$testname}) = add_counts(
+					$testdata->{$testname},
+					$testdata->{$testname.",diff"});
+				delete($testdata->{$testname.",diff"});
+			}
+			$testdata->{$testname.",diff"} = $testdata->{$testname};
+			delete($testdata->{$testname});
+		}
+		# Convert summary of test data
+		$sumcount = apply_diff($sumcount, $line_hash);
+		# Convert function data
+		$funcdata = apply_diff($funcdata, $line_hash);
+		# Convert checksum data
+		$checkdata = apply_diff($checkdata, $line_hash);
+		# Update found/hit numbers
+		$found = 0;
+		$hit = 0;
+		foreach (keys(%{$sumcount}))
+		{
+			$found++;
+			if ($sumcount->{$_} > 0)
+			{
+				$hit++;
+			}
+		}
+		if ($found > 0)
+		{
+			# Store converted entry
+			set_info_entry($entry, $testdata, $sumcount, $funcdata,
+				       $checkdata, $found, $hit);
+		}
+		else
+		{
+			# Remove empty data set
+			delete($trace_data->{$filename});
+		}
+        }
+
+	# Convert filenames as well if requested
+	if ($convert_filenames)
+	{
+		convert_paths($trace_data, \%path_conversion_data);
+	}
+
+	info("$converted entr".($converted != 1 ? "ies" : "y")." converted, ".
+	     "$unchanged entr".($unchanged != 1 ? "ies" : "y")." left ".
+	     "unchanged.\n");
+
+	# Write data
+	if ($to_file)
+	{
+		info("Writing data to $output_filename\n");
+		open(INFO_HANDLE, ">$output_filename")
+			or die("ERROR: cannot write to $output_filename!\n");
+		write_info_file(*INFO_HANDLE, $trace_data);
+		close(*INFO_HANDLE);
+	}
+	else
+	{
+		write_info_file(*STDOUT, $trace_data);
+	}
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+#   MODE & 1: suppress STDOUT
+#   MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+	my $mode = shift;
+	my $result;
+	local *OLD_STDERR;
+	local *OLD_STDOUT;
+
+	# Save old stdout and stderr handles
+	($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
+	($mode & 2) && open(OLD_STDERR, ">>&STDERR");
+
+	# Redirect to /dev/null
+	($mode & 1) && open(STDOUT, ">/dev/null");
+	($mode & 2) && open(STDERR, ">/dev/null");
+ 
+	system(@_);
+	$result = $?;
+
+	# Close redirected handles
+	($mode & 1) && close(STDOUT);
+	($mode & 2) && close(STDERR);
+
+	# Restore old handles
+	($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
+	($mode & 2) && open(STDERR, ">>&OLD_STDERR");
+ 
+	return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+	my $filename = $_[0];
+	my %result;
+	my $key;
+	my $value;
+	local *HANDLE;
+
+	if (!open(HANDLE, "<$filename"))
+	{
+		warn("WARNING: cannot read configuration file $filename\n");
+		return undef;
+	}
+	while (<HANDLE>)
+	{
+		chomp;
+		# Skip comments
+		s/#.*//;
+		# Remove leading blanks
+		s/^\s+//;
+		# Remove trailing blanks
+		s/\s+$//;
+		next unless length;
+		($key, $value) = split(/\s*=\s*/, $_, 2);
+		if (defined($key) && defined($value))
+		{
+			$result{$key} = $value;
+		}
+		else
+		{
+			warn("WARNING: malformed statement in line $. ".
+			     "of configuration file $filename\n");
+		}
+	}
+	close(HANDLE);
+	return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+#   key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hash CONFIG contains a value for
+# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword. 
+#
+
+sub apply_config($)
+{
+	my $ref = $_[0];
+
+	foreach (keys(%{$ref}))
+	{
+		if (defined($config->{$_}))
+		{
+			${$ref->{$_}} = $config->{$_};
+		}
+	}
+}