X-Git-Url: http://www.privoxy.org/gitweb/?p=privoxy.git;a=blobdiff_plain;f=tools%2Fprivoxy-regression-test.pl;h=a083ff0c8a3460fb98de29e2805eb42e9df37d7d;hp=d7f5eed56401d00b16ef97eea2e589e91827f00b;hb=777f23fd554b0811d16bc2323ced6fe9dc0cff72;hpb=7206aba26d5e90b6d0e30b516969657c45c06386 diff --git a/tools/privoxy-regression-test.pl b/tools/privoxy-regression-test.pl index d7f5eed5..a083ff0c 100755 --- a/tools/privoxy-regression-test.pl +++ b/tools/privoxy-regression-test.pl @@ -7,7 +7,7 @@ # A regression test "framework" for Privoxy. For documentation see: # perldoc privoxy-regression-test.pl # -# $Id: privoxy-regression-test.pl,v 1.84 2013/01/06 18:14:44 fabiankeil Exp $ +# $Id: privoxy-regression-test.pl,v 1.92 2013/03/20 11:30:45 fabiankeil Exp $ # # Wish list: # @@ -19,7 +19,7 @@ # - Document magic Expect Header values # - Internal fuzz support? # -# Copyright (c) 2007-2011 Fabian Keil +# Copyright (c) 2007-2013 Fabian Keil # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -40,7 +40,7 @@ use strict; use Getopt::Long; use constant { - PRT_VERSION => 'Privoxy-Regression-Test 0.5', + PRT_VERSION => 'Privoxy-Regression-Test 0.6', CURL => 'curl', @@ -66,8 +66,6 @@ use constant { DEBUG_LEVEL_VERBOSE_SUCCESS => 0, DEBUG_LEVEL_STATUS => 1, - VERBOSE_TEST_DESCRIPTION => 1, - # Internal use, don't modify # Available debug bits: LL_SOFT_ERROR => 1, @@ -92,7 +90,6 @@ sub init_our_variables () { our $leading_log_time = LEADING_LOG_TIME; our $leading_log_date = LEADING_LOG_DATE; our $privoxy_cgi_url = PRIVOXY_CGI_URL; - our $verbose_test_description = VERBOSE_TEST_DESCRIPTION; our $log_level = get_default_log_level(); } @@ -479,6 +476,33 @@ sub enlist_new_test ($$$$$$) { "Regression test " . $number . " (section:" . $si . "):"); } +sub mark_matching_tests_for_skipping($) { + my $overwrite_condition = shift; + + our @regression_tests; + + for (my $s = 0; $s < @regression_tests; $s++) { + + my $r = 0; + + while (defined $regression_tests[$s][$r]) { + + if ($regression_tests[$s][$r]{'data'} eq $overwrite_condition) { + my $message = sprintf("Marking test %s for ignoring. Overwrite condition: %s.", + $regression_tests[$s][$r]{'number'}, $overwrite_condition); + + l(LL_FILE_LOADING, $message); + + # XXX: Should eventually get its own key so get_skip_reason() + # can tell about the overwrite condition. + $regression_tests[$s][$r]{'ignore'} = 1; + } + $r++; + } + } +} + + # XXX: Shares a lot of code with load_regression_tests_from_file() # that should be factored out. sub load_action_files ($) { @@ -545,7 +569,16 @@ sub load_action_files ($) { "action parameters are currently unsupported."); } } - + + if ($token eq 'overwrite condition') { + + l(LL_FILE_LOADING, "Detected overwrite condition: " . $value); + # We can only skip matching tests that have already + # be loaded but that is exactly what we want anyway. + mark_matching_tests_for_skipping($value); + next; + } + if ($si == -1 || $ri == -1) { # No beginning of a test detected yet, # so we don't care about any other test @@ -683,12 +716,12 @@ sub execute_regression_tests () { # from different sections isn't possible. # Is this worth changing the layout? fisher_yates_shuffle(\@regression_tests); - for (my $s = 0; $s < @regression_tests; $s++) { + for (my $s = 0; $s < @regression_tests; $s++) { fisher_yates_shuffle($regression_tests[$s]); } } - for (my $s = 0; $s < @regression_tests; $s++) { + for (my $s = 0; $s < @regression_tests; $s++) { my $r = 0; @@ -1509,86 +1542,81 @@ sub log_message ($) { sub log_result ($$) { - our $verbose_test_description; our $filtered_request; my $test = shift; my $result = shift; my $number = shift; - my $message = ''; + my $message = sprintf("%s for test %d", + interpret_result($result), + $test->{'number'}); - $message .= interpret_result($result); - $message .= " for test "; - $message .= $number; - $message .= '/'; - $message .= $test->{'number'}; - $message .= '/'; - $message .= $test->{'section-id'}; - $message .= '/'; - $message .= $test->{'regression-test-id'}; - $message .= '.'; + if (cli_option_is_set('verbose')) { + $message .= sprintf(" (%d/%d/%d)", $number, + $test->{'section-id'}, + $test->{'regression-test-id'}); + } - if ($verbose_test_description) { + $message .= '. '; - if ($test->{'type'} == CLIENT_HEADER_TEST) { + if ($test->{'type'} == CLIENT_HEADER_TEST) { - $message .= ' Header '; - $message .= quote($test->{'data'}); - $message .= ' and tag '; - $message .= quote($test->{'tag'}); + $message .= 'Header '; + $message .= quote($test->{'data'}); + $message .= ' and tag '; + $message .= quote($test->{'tag'}); - } elsif ($test->{'type'} == SERVER_HEADER_TEST) { + } elsif ($test->{'type'} == SERVER_HEADER_TEST) { - $message .= ' Request Header '; - $message .= quote($test->{'data'}); - $message .= ' and tag '; - $message .= quote($test->{'tag'}); + $message .= 'Request Header '; + $message .= quote($test->{'data'}); + $message .= ' and tag '; + $message .= quote($test->{'tag'}); - } elsif ($test->{'type'} == DUMB_FETCH_TEST) { + } elsif ($test->{'type'} == DUMB_FETCH_TEST) { - $message .= ' URL '; - $message .= quote($test->{'data'}); - $message .= ' and expected status code '; - $message .= quote($test->{'expected-status-code'}); + $message .= 'URL '; + $message .= quote($test->{'data'}); + $message .= ' and expected status code '; + $message .= quote($test->{'expected-status-code'}); - } elsif ($test->{'type'} == TRUSTED_CGI_REQUEST) { + } elsif ($test->{'type'} == TRUSTED_CGI_REQUEST) { - $message .= ' CGI URL '; - $message .= quote($test->{'data'}); - $message .= ' and expected status code '; - $message .= quote($test->{'expected-status-code'}); + $message .= 'CGI URL '; + $message .= quote($test->{'data'}); + $message .= ' and expected status code '; + $message .= quote($test->{'expected-status-code'}); - } elsif ($test->{'type'} == METHOD_TEST) { + } elsif ($test->{'type'} == METHOD_TEST) { - $message .= ' HTTP method '; - $message .= quote($test->{'data'}); - $message .= ' and expected status code '; - $message .= quote($test->{'expected-status-code'}); + $message .= 'HTTP method '; + $message .= quote($test->{'data'}); + $message .= ' and expected status code '; + $message .= quote($test->{'expected-status-code'}); - } elsif ($test->{'type'} == BLOCK_TEST) { + } elsif ($test->{'type'} == BLOCK_TEST) { - $message .= ' Supposedly-blocked URL: '; - $message .= quote($test->{'data'}); + $message .= 'Supposedly-blocked URL: '; + $message .= quote($test->{'data'}); - } elsif ($test->{'type'} == STICKY_ACTIONS_TEST) { + } elsif ($test->{'type'} == STICKY_ACTIONS_TEST) { - $message .= ' Sticky Actions: '; - $message .= quote($test->{'sticky-actions'}); - $message .= ' and URL: '; - $message .= quote($test->{'data'}); + $message .= 'Sticky Actions: '; + $message .= quote($test->{'sticky-actions'}); + $message .= ' and URL: '; + $message .= quote($test->{'data'}); - } elsif ($test->{'type'} == REDIRECT_TEST) { + } elsif ($test->{'type'} == REDIRECT_TEST) { - $message .= ' Redirected URL: '; - $message .= quote($test->{'data'}); - $message .= ' and redirect destination: '; - $message .= quote($test->{'redirect destination'}); + $message .= 'Redirected URL: '; + $message .= quote($test->{'data'}); + $message .= ' and redirect destination: '; + $message .= quote($test->{'redirect destination'}); - } else { + } else { - die "Incomplete support for test type " . $test->{'type'} . " detected."; - } + die "Incomplete support for test type " . $test->{'type'} . " detected."; } log_message($message) if (!$result or cli_option_is_set('verbose')); @@ -1881,6 +1909,28 @@ The difference between a skipped test and a removed one is that removing a test affects the numbers of the following tests, while a skipped test is still loaded and thus keeps the test numbers unchanged. +Sometimes user modifications intentionally conflict with tests in the +default configuration and thus cause test failures. Adding the Ignore +directive to the failing tests works but is inconvenient as the directive +is likely to get lost with the next update. + +Overwrite conditions are an alternative and can be added in any action +file as long as the come after the test that is expected to fail. +They causes all previous tests a matching the condition to be skipped. + +It is recommended to put the overwrite condition below the custom Privoxy +section that causes the expected test failure and before the custom test +that verifies that tests the now expected behaviour. Example: + +# The following section is expected to overwrite a section in +# default.action, whose effect is tested. Thus also disable the +# test that is now expected to fail and add a new one. +# +{+block{Facebook makes Firefox even more unstable. Do not want.}} +# Overwrite condition = http://apps.facebook.com/onthefarm/track.php?creative=&cat=friendvisit&subcat=weeds&key=a789a971dc687bee4c20c044834fabdd&next=index.php%3Fref%3Dnotif%26visitId%3D898835505 +# Blocked URL = http://apps.facebook.com/ +.facebook./ + =head1 TEST LEVELS All tests have test levels to let the user