From b3d71b02d0ad7277e2c18e0d2073722871f1552f Mon Sep 17 00:00:00 2001
From: Marina Polyakova <m.polyakova@postgrespro.ru>
Date: Mon, 21 May 2018 15:26:06 +0300
Subject: [PATCH v9] Pgbench errors and serialization/deadlock retries

Client's run is aborted only in case of a serious error, for example, the
connection with the backend was lost. Otherwise if the execution of SQL or meta
command fails, the client's run continues normally until the end of the current
script execution (it is assumed that one transaction script contains only one
transaction).

Transactions with serialization or deadlock failures are rolled back and
repeated until they complete successfully or reach the maximum number of tries
(specified by the --max-tries option) / the maximum time of tries (specified by
the --latency-limit option). These options can be combined together; but if
none of them are used, failed transactions are not retried at all. If the last
transaction run fails, this transaction will be reported as failed, and the
client variables will be set as they were before the first run of this
transaction.

If there're retries and/or errors their statistics are printed in the progress,
in the transaction / aggregation logs and in the end with other results (all and
for each script). A transaction error is reported here only if the last try of
this transaction fails. Also retries and/or errors are printed per-command with
average latencies if you use the appropriate benchmarking option
(--report-per-command, -r) and the total number of retries and/or errors is not
zero.

If a failed transaction block does not terminate in the current script, the
commands of the following scripts are processed as usual so you can get a lot of
errors of type "in failed SQL transaction" (when the current SQL transaction is
aborted and commands ignored until end of transaction block). In such cases you
can use separate statistics of these errors in all reports.

If you want to distinguish between failures or errors by type (including which
limit for retries was violated and how far it was exceeded for the
serialization/deadlock errors), use the pgbench debugging output created with
the option --debug and with the debugging level "fails" or "all". The first
variant is recommended for this purpose because with in the second case the
debugging output can be very large.
---
 doc/src/sgml/ref/pgbench.sgml                      |  321 +++++-
 src/bin/pgbench/pgbench.c                          | 1109 ++++++++++++++++----
 src/bin/pgbench/t/001_pgbench_with_server.pl       |   44 +-
 src/bin/pgbench/t/002_pgbench_no_server.pl         |    7 +-
 .../t/003_serialization_and_deadlock_fails.pl      |  761 ++++++++++++++
 5 files changed, 2025 insertions(+), 217 deletions(-)
 create mode 100644 src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl

diff --git a/doc/src/sgml/ref/pgbench.sgml b/doc/src/sgml/ref/pgbench.sgml
index e4b37dd..f894390 100644
--- a/doc/src/sgml/ref/pgbench.sgml
+++ b/doc/src/sgml/ref/pgbench.sgml
@@ -55,16 +55,19 @@ number of clients: 10
 number of threads: 1
 number of transactions per client: 1000
 number of transactions actually processed: 10000/10000
+maximum number of tries: 1
 tps = 85.184871 (including connections establishing)
 tps = 85.296346 (excluding connections establishing)
 </screen>
 
-  The first six lines report some of the most important parameter
-  settings.  The next line reports the number of transactions completed
-  and intended (the latter being just the product of number of clients
+  The first six lines and the eighth line report some of the most important
+  parameter settings.  The seventh line reports the number of transactions
+  completed and intended (the latter being just the product of number of clients
   and number of transactions per client); these will be equal unless the run
-  failed before completion.  (In <option>-T</option> mode, only the actual
-  number of transactions is printed.)
+  failed before completion or some SQL/meta command(s) failed.  (In
+  <option>-T</option> mode, only the actual number of transactions is printed.)
+  (see <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+  for more information)
   The last two lines report the number of transactions per second,
   figured with and without counting the time to start database sessions.
  </para>
@@ -380,11 +383,28 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
      </varlistentry>
 
      <varlistentry>
-      <term><option>-d</option></term>
-      <term><option>--debug</option></term>
+      <term><option>-d</option> <replaceable>debug_level</replaceable></term>
+      <term><option>--debug=</option><replaceable>debug_level</replaceable></term>
       <listitem>
        <para>
-        Print debugging output.
+        Print debugging output. You can use the following debugging levels:
+          <itemizedlist>
+           <listitem>
+            <para><literal>no</literal>: no debugging output (except built-in
+            function <function>debug</function>, see <xref
+            linkend="pgbench-functions"/>).</para>
+           </listitem>
+           <listitem>
+            <para><literal>fails</literal>: print only failure messages, errors
+            and retries (see <xref linkend="errors-and-retries"
+            endterm="errors-and-retries-title"/> for more information).</para>
+           </listitem>
+           <listitem>
+            <para><literal>all</literal>: print all debugging output
+            (throttling, executed/sent/received commands etc.).</para>
+           </listitem>
+          </itemizedlist>
+        The default is no debugging output.
        </para>
       </listitem>
      </varlistentry>
@@ -453,6 +473,16 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
         at all. They are counted and reported separately as
         <firstterm>skipped</firstterm>.
        </para>
+       <para>
+        The transaction with serialization or deadlock failure can be retried if
+        the total time of all its tries is less than
+        <replaceable>limit</replaceable> ms. This option can be combined with
+        the option <option>--max-tries</option> which limits the total number of
+        transaction tries. But if none of them are used, failed transactions are
+        not retried at all. See
+        <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+        for more information about retrying failed transactions.
+       </para>
        </listitem>
      </varlistentry>
 
@@ -513,22 +543,38 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
         Show progress report every <replaceable>sec</replaceable> seconds.  The report
         includes the time since the beginning of the run, the tps since the
         last report, and the transaction latency average and standard
-        deviation since the last report.  Under throttling (<option>-R</option>),
-        the latency is computed with respect to the transaction scheduled
-        start time, not the actual transaction beginning time, thus it also
-        includes the average schedule lag time.
+        deviation since the last report.  If any transactions ended with a
+        failed SQL or meta command since the last report, they are also reported
+        as failed.  If any transactions ended with an error "in failed SQL
+        transaction block", they are reported separatly as <literal>in failed
+        tx</literal> (see <xref linkend="errors-and-retries"
+        endterm="errors-and-retries-title"/> for more information).  Under
+        throttling (<option>-R</option>), the latency is computed with respect
+        to the transaction scheduled start time, not the actual transaction
+        beginning time, thus it also includes the average schedule lag time.  If
+        any transactions have been rolled back and retried after a
+        serialization/deadlock failure since the last report, the report
+        includes the number of such transactions and the sum of all retries. Use
+        the options <option>--max-tries</option> and/or
+        <option>--latency-limit</option> to enable transactions retries after
+        serialization/deadlock failures.
        </para>
       </listitem>
      </varlistentry>
 
      <varlistentry>
       <term><option>-r</option></term>
-      <term><option>--report-latencies</option></term>
+      <term><option>--report-per-command</option></term>
       <listitem>
        <para>
-        Report the average per-statement latency (execution time from the
-        perspective of the client) of each command after the benchmark
-        finishes.  See below for details.
+        Report the following statistics for each command after the benchmark
+        finishes: the average per-statement latency (execution time from the
+        perspective of the client), the number of all errors, the number of
+        errors "in failed SQL transaction block", and the number of retries
+        after serialization or deadlock failures.  The report displays the
+        columns with statistics on errors and retries only if the current
+        <application>pgbench</application> run has an error of the corresponding
+        type or retry, respectively. See below for details.
        </para>
       </listitem>
      </varlistentry>
@@ -667,6 +713,21 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
      </varlistentry>
 
      <varlistentry>
+      <term><option>--max-tries=<replaceable>number_of_tries</replaceable></option></term>
+      <listitem>
+       <para>
+        Set the maximum number of tries for transactions with
+        serialization/deadlock failures. This option can be combined with the
+        option <option>--latency-limit</option> which limits the total time of
+        transaction tries. But if none of them are used, failed transactions are
+        not retried at all. See
+        <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+        for more information about retrying failed transactions.
+       </para>
+      </listitem>
+     </varlistentry>
+
+     <varlistentry>
       <term><option>--progress-timestamp</option></term>
       <listitem>
        <para>
@@ -807,8 +868,8 @@ pgbench <optional> <replaceable>options</replaceable> </optional> <replaceable>d
  <refsect1>
   <title>Notes</title>
 
- <refsect2>
-  <title>What is the <quote>Transaction</quote> Actually Performed in <application>pgbench</application>?</title>
+ <refsect2 id="transactions-and-scripts">
+  <title id="transactions-and-scripts-title">What is the <quote>Transaction</quote> Actually Performed in <application>pgbench</application>?</title>
 
   <para>
    <application>pgbench</application> executes test scripts chosen randomly
@@ -1583,7 +1644,7 @@ END;
    The format of the log is:
 
 <synopsis>
-<replaceable>client_id</replaceable> <replaceable>transaction_no</replaceable> <replaceable>time</replaceable> <replaceable>script_no</replaceable> <replaceable>time_epoch</replaceable> <replaceable>time_us</replaceable> <optional> <replaceable>schedule_lag</replaceable> </optional>
+<replaceable>client_id</replaceable> <replaceable>transaction_no</replaceable> <replaceable>time</replaceable> <replaceable>script_no</replaceable> <replaceable>time_epoch</replaceable> <replaceable>time_us</replaceable> <optional> <replaceable>schedule_lag</replaceable> </optional> <optional> <replaceable>retries</replaceable> </optional>
 </synopsis>
 
    where
@@ -1604,6 +1665,17 @@ END;
    When both <option>--rate</option> and <option>--latency-limit</option> are used,
    the <replaceable>time</replaceable> for a skipped transaction will be reported as
    <literal>skipped</literal>.
+   <replaceable>retries</replaceable> is the sum of all the retries after the
+   serialization or deadlock failures during the current script execution. It is
+   only present when the maximum number of tries for transactions is more than 1
+   (<option>--max-tries</option>) and/or the maximum time of tries for
+   transactions is used (<option>--latency-limit</option>). If the transaction
+   ended with an error "in failed SQL transaction", its
+   <replaceable>time</replaceable> will be reported as
+   <literal>in_failed_tx</literal>. If the transaction ended with other error,
+   its <replaceable>time</replaceable> will be reported as
+   <literal>failed</literal> (see <xref linkend="errors-and-retries"
+   endterm="errors-and-retries-title"/> for more information).
   </para>
 
   <para>
@@ -1633,6 +1705,24 @@ END;
   </para>
 
   <para>
+   The following example shows a snippet of a log file with errors and retries,
+   with the maximum number of tries set to 10 (note the additional
+   <replaceable>retries</replaceable> column):
+<screen>
+3 0 47423 0 1499414498 34501 4
+3 1 8333 0 1499414498 42848 1
+3 2 8358 0 1499414498 51219 1
+4 0 72345 0 1499414498 59433 7
+1 3 41718 0 1499414498 67879 5
+1 4 8416 0 1499414498 76311 1
+3 3 33235 0 1499414498 84469 4
+0 0 failed 0 1499414498 84905 10
+2 0 failed 0 1499414498 86248 10
+3 4 8307 0 1499414498 92788 1
+</screen>
+  </para>
+
+  <para>
    When running a long test on hardware that can handle a lot of transactions,
    the log files can become very large.  The <option>--sampling-rate</option> option
    can be used to log only a random sample of transactions.
@@ -1647,7 +1737,7 @@ END;
    format is used for the log files:
 
 <synopsis>
-<replaceable>interval_start</replaceable> <replaceable>num_transactions</replaceable> <replaceable>sum_latency</replaceable> <replaceable>sum_latency_2</replaceable> <replaceable>min_latency</replaceable> <replaceable>max_latency</replaceable> <optional> <replaceable>sum_lag</replaceable> <replaceable>sum_lag_2</replaceable> <replaceable>min_lag</replaceable> <replaceable>max_lag</replaceable> <optional> <replaceable>skipped</replaceable> </optional> </optional>
+<replaceable>interval_start</replaceable> <replaceable>num_transactions</replaceable> <replaceable>sum_latency</replaceable> <replaceable>sum_latency_2</replaceable> <replaceable>min_latency</replaceable> <replaceable>max_latency</replaceable> <replaceable>failed_tx</replaceable> <replaceable>in_failed_tx</replaceable> <optional> <replaceable>sum_lag</replaceable> <replaceable>sum_lag_2</replaceable> <replaceable>min_lag</replaceable> <replaceable>max_lag</replaceable> <optional> <replaceable>skipped</replaceable> </optional> </optional> <optional> <replaceable>retried_tx</replaceable> <replaceable>retries</replaceable> </optional>
 </synopsis>
 
    where
@@ -1661,7 +1751,13 @@ END;
    transaction latencies within the interval,
    <replaceable>min_latency</replaceable> is the minimum latency within the interval,
    and
-   <replaceable>max_latency</replaceable> is the maximum latency within the interval.
+   <replaceable>max_latency</replaceable> is the maximum latency within the interval,
+   <replaceable>failed_tx</replaceable> is the number of transactions that ended
+   with a failed SQL or meta command within the interval,
+   <replaceable>in_failed_tx</replaceable> is the number of transactions that
+   ended with an error "in failed SQL transaction block" (see
+   <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+   for more information).
    The next fields,
    <replaceable>sum_lag</replaceable>, <replaceable>sum_lag_2</replaceable>, <replaceable>min_lag</replaceable>,
    and <replaceable>max_lag</replaceable>, are only present if the <option>--rate</option>
@@ -1669,21 +1765,28 @@ END;
    They provide statistics about the time each transaction had to wait for the
    previous one to finish, i.e. the difference between each transaction's
    scheduled start time and the time it actually started.
-   The very last field, <replaceable>skipped</replaceable>,
+   The next field, <replaceable>skipped</replaceable>,
    is only present if the <option>--latency-limit</option> option is used, too.
    It counts the number of transactions skipped because they would have
    started too late.
+   The <replaceable>retried_tx</replaceable> and
+   <replaceable>retries</replaceable> fields are only present if the maximum
+   number of tries for transactions is more than 1
+   (<option>--max-tries</option>) and/or the maximum time of tries for
+   transactions is used (<option>--latency-limit</option>). They report the
+   number of retried transactions and the sum of all the retries after
+   serialization or deadlock failures within the interval.
    Each transaction is counted in the interval when it was committed.
   </para>
 
   <para>
    Here is some example output:
 <screen>
-1345828501 5601 1542744 483552416 61 2573
-1345828503 7884 1979812 565806736 60 1479
-1345828505 7208 1979422 567277552 59 1391
-1345828507 7685 1980268 569784714 60 1398
-1345828509 7073 1979779 573489941 236 1411
+1345828501 5601 1542744 483552416 61 2573 0 0
+1345828503 7884 1979812 565806736 60 1479 0 0
+1345828505 7208 1979422 567277552 59 1391 0 0
+1345828507 7685 1980268 569784714 60 1398 0 0
+1345828509 7073 1979779 573489941 236 1411 0 0
 </screen></para>
 
   <para>
@@ -1695,15 +1798,54 @@ END;
  </refsect2>
 
  <refsect2>
-  <title>Per-Statement Latencies</title>
+  <title>Per-Statement Report</title>
+
+  <para>
+   With the <option>-r</option> option, <application>pgbench</application>
+   collects the following statistics for each statement:
+   <itemizedlist>
+     <listitem>
+       <para>
+         <literal>latency</literal> &mdash; elapsed transaction time for each
+         statement. <application>pgbench</application> reports an average value
+         of all successful runs of the statement.
+       </para>
+     </listitem>
+     <listitem>
+       <para>
+         The number of errors in this statement. See
+         <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+         for more information.
+       </para>
+     </listitem>
+     <listitem>
+       <para>
+         The number of errors "in failed SQL transaction" in this statement. See
+         <xref linkend="errors-and-retries" endterm="errors-and-retries-title"/>
+         for more information.
+       </para>
+     </listitem>
+     <listitem>
+       <para>
+         The number of retries after a serialization or a deadlock failure in
+         this statement. See <xref linkend="errors-and-retries"
+         endterm="errors-and-retries-title"/> for more information.
+       </para>
+     </listitem>
+   </itemizedlist>
+  </para>
 
   <para>
-   With the <option>-r</option> option, <application>pgbench</application> collects
-   the elapsed transaction time of each statement executed by every
-   client.  It then reports an average of those values, referred to
-   as the latency for each statement, after the benchmark has finished.
+   The report displays the columns with statistics on errors and retries only if
+   the current <application>pgbench</application> run has an error or retry,
+   respectively.
   </para>
 
+   <para>
+   All values are computed for each statement executed by every client and are
+   reported after the benchmark has finished.
+   </para>
+
   <para>
    For the default script, the output will look similar to this:
 <screen>
@@ -1715,6 +1857,7 @@ number of clients: 10
 number of threads: 1
 number of transactions per client: 1000
 number of transactions actually processed: 10000/10000
+maximum number of tries: 1
 latency average = 15.844 ms
 latency stddev = 2.715 ms
 tps = 618.764555 (including connections establishing)
@@ -1732,10 +1875,50 @@ statement latencies in milliseconds:
         0.371  INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
         1.212  END;
 </screen>
+
+   Another example of output for the default script using serializable default
+   transaction isolation level (<command>PGOPTIONS='-c
+   default_transaction_isolation=serializable' pgbench ...</command>):
+<screen>
+starting vacuum...end.
+transaction type: &lt;builtin: TPC-B (sort of)&gt;
+scaling factor: 1
+query mode: simple
+number of clients: 10
+number of threads: 1
+number of transactions per client: 1000
+number of transactions actually processed: 4473/10000
+number of errors: 5527 (55.270%)
+number of retried: 7467 (74.670%)
+number of retries: 257244
+maximum number of tries: 100
+number of transactions above the 100.0 ms latency limit: 5766/10000 (57.660 %) (including errors)
+latency average = 41.169 ms
+latency stddev = 51.783 ms
+tps = 50.322494 (including connections establishing)
+tps = 50.324595 (excluding connections establishing)
+statement latencies in milliseconds, errors and retries:
+  0.004     0       0  \set aid random(1, 100000 * :scale)
+  0.000     0       0  \set bid random(1, 1 * :scale)
+  0.000     0       0  \set tid random(1, 10 * :scale)
+  0.000     0       0  \set delta random(-5000, 5000)
+  0.213     0       0  BEGIN;
+  0.393     0       0  UPDATE pgbench_accounts
+                       SET abalance = abalance + :delta WHERE aid = :aid;
+  0.332     0       0  SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
+  0.409  4971  250265  UPDATE pgbench_tellers
+                       SET tbalance = tbalance + :delta WHERE tid = :tid;
+  0.311   556    6975  UPDATE pgbench_branches
+                       SET bbalance = bbalance + :delta WHERE bid = :bid;
+  0.299     0       0  INSERT INTO pgbench_history
+                              (tid, bid, aid, delta, mtime)
+                       VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
+  0.520     0       4  END;
+</screen>
   </para>
 
   <para>
-   If multiple script files are specified, the averages are reported
+   If multiple script files are specified, all statistics are reported
    separately for each script file.
   </para>
 
@@ -1749,6 +1932,78 @@ statement latencies in milliseconds:
   </para>
  </refsect2>
 
+ <refsect2 id="errors-and-retries">
+  <title id="errors-and-retries-title">Errors and Serialization/Deadlock Retries</title>
+
+  <para>
+   Client's run is aborted only in case of a serious error, for example, the
+   connection with the backend was lost. Otherwise if the execution of SQL or
+   meta command fails, the client's run continues normally until the end of the
+   current script execution (it is assumed that one transaction script contains
+   only one transaction; see <xref linkend="transactions-and-scripts"
+   endterm="transactions-and-scripts-title"/> for more information).
+   Transactions with serialization or deadlock failures are rolled back and
+   repeated until they complete successfully or reach the maximum number of
+   tries (specified by the <option>--max-tries</option> option) / the maximum
+   time of tries (specified by the <option>--latency-limit</option> option). If
+   the last transaction run fails, this transaction will be reported as failed,
+   and the client variables will be set as they were before the first run of
+   this transaction.
+  </para>
+
+  <note>
+   <para>
+    Be careful when repeating scripts that contain multiple transactions: the
+    script is always retried completely, so the successful transactions can be
+    performed several times.
+   </para>
+   <para>
+    Be careful when repeating transactions with shell commands. Unlike the
+    results of SQL commands, the results of shell commands are not rolled back,
+    except for the variable value of the <command>\setshell</command> command.
+   </para>
+   <para>
+    If a failed transaction block does not terminate in the current script, the
+    commands of the following scripts are processed as usual so you can get a
+    lot of errors of type "in failed SQL transaction" (when the current SQL
+    transaction is aborted and commands ignored until end of transaction block).
+    In such cases you can use separate statistics of these errors in all
+    reports.
+   </para>
+  </note>
+
+  <para>
+   The latency of a successful transaction includes the entire time of
+   transaction execution with rollbacks and retries. The latency for failed
+   transactions and commands is not computed separately.
+  </para>
+
+  <para>
+   The main report contains the number of failed transactions if it is non-zero.
+   If the total number of transactions ended with an error "in failed SQL
+   transaction block" is non-zero, the main report also contains it. If the
+   total number of retried transactions is non-zero, the main report also
+   contains the statistics related to retries: the total number of retried
+   transactions and total number of retries (use the options
+   <option>--max-tries</option> and/or <option>--latency-limit</option> to make
+   it possible). The per-statement report inherits all columns from the main
+   report. Note that if a failure/error occurs, the following failures/errors in
+   the current script execution are not shown in the reports. The retry is only
+   reported for the first command where the failure occured during the current
+   script execution.
+  </para>
+
+  <para>
+   If you want to distinguish between failures or errors by type (including
+   which limit for retries was violated and how far it was exceeded for the
+   serialization/deadlock errors), use the <application>pgbench</application>
+   debugging output created with the option <option>--debug</option> and with
+   the debugging level <literal>fails</literal> or <literal>all</literal>. The
+   first variant is recommended for this purpose because with in the second case
+   the debugging output can be very large.
+  </para>
+ </refsect2>
+
  <refsect2>
   <title>Good Practices</title>
 
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index d100cee..57495d6 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -59,6 +59,9 @@
 
 #include "pgbench.h"
 
+#define ERRCODE_IN_FAILED_SQL_TRANSACTION  "25P02"
+#define ERRCODE_T_R_SERIALIZATION_FAILURE  "40001"
+#define ERRCODE_T_R_DEADLOCK_DETECTED  "40P01"
 #define ERRCODE_UNDEFINED_TABLE  "42P01"
 
 /*
@@ -187,9 +190,25 @@ bool		progress_timestamp = false; /* progress report with Unix time */
 int			nclients = 1;		/* number of clients */
 int			nthreads = 1;		/* number of threads */
 bool		is_connect;			/* establish connection for each transaction */
-bool		is_latencies;		/* report per-command latencies */
+bool		report_per_command = false;	/* report per-command latencies, retries
+										 * after the failures and errors
+										 * (failures without retrying) */
 int			main_pid;			/* main process id used in log filename */
 
+/*
+ * There're different types of restrictions for deciding that the current failed
+ * transaction can no longer be retried and should be reported as failed:
+ * - max_tries can be used to limit the number of tries;
+ * - latency_limit can be used to limit the total time of tries.
+ *
+ * They can be combined together, and you need to use at least one of them to
+ * retry the failed transactions. By default, failed transactions are not
+ * retried at all.
+ */
+uint32		max_tries = 0;		/* we cannot retry a failed transaction if its
+								 * number of tries reaches this maximum; if its
+								 * value is zero, it is not used */
+
 char	   *pghost = "";
 char	   *pgport = "";
 char	   *login = NULL;
@@ -243,9 +262,21 @@ typedef struct SimpleStats
 typedef struct StatsData
 {
 	time_t		start_time;		/* interval start time, for aggregates */
-	int64		cnt;			/* number of transactions, including skipped */
+	int64		cnt;			/* number of sucessfull transactions, including
+								 * skipped */
 	int64		skipped;		/* number of transactions skipped under --rate
 								 * and --latency-limit */
+	int64		retries;
+	int64		retried;		/* number of transactions that were retried
+								 * after a serialization or a deadlock
+								 * failure */
+	int64		errors;			/* number of transactions that were not retried
+								 * after a serialization or a deadlock
+								 * failure or had another error (including meta
+								 * commands errors) */
+	int64		errors_in_failed_tx;	/* number of transactions that failed in
+										 * a error
+										 * ERRCODE_IN_FAILED_SQL_TRANSACTION */
 	SimpleStats latency;
 	SimpleStats lag;
 } StatsData;
@@ -269,6 +300,36 @@ typedef struct RandomState
 } RandomState;
 
 /*
+ * Data structure for repeating a transaction from the beginnning with the same
+ * parameters.
+ */
+typedef struct RetryState
+{
+	RandomState random_state;	/* random seed */
+	Variables   variables;		/* client variables */
+} RetryState;
+
+/*
+ * For the failures during script execution.
+ */
+typedef enum FailureStatus
+{
+	NO_FAILURE = 0,
+	ANOTHER_FAILURE,			/* other failures that are not listed by
+								 * themselves below */
+	SERIALIZATION_FAILURE,
+	DEADLOCK_FAILURE,
+	IN_FAILED_SQL_TRANSACTION
+} FailureStatus;
+
+typedef struct Failure
+{
+	FailureStatus status;		/* type of the failure */
+	int			command;		/* command number in script where the failure
+								 * occurred */
+} Failure;
+
+/*
  * Connection state machine states.
  */
 typedef enum
@@ -323,6 +384,22 @@ typedef enum
 	CSTATE_END_COMMAND,
 
 	/*
+	 * States for transactions with serialization or deadlock failures.
+	 *
+	 * First, remember the failure in CSTATE_FAILURE. Then process other
+	 * commands of the failed transaction if any and go to CSTATE_RETRY. If we
+	 * can re-execute the transaction from the very beginning, report this as a
+	 * failure, set the same parameters for the transaction execution as in the
+	 * previous tries and process the first transaction command in
+	 * CSTATE_START_COMMAND. Otherwise, report this as an error, set the
+	 * parameters for the transaction execution as they were before the first
+	 * run of this transaction (except for a random state) and go to
+	 * CSTATE_END_TX to complete this transaction.
+	 */
+	CSTATE_FAILURE,
+	CSTATE_RETRY,
+
+	/*
 	 * CSTATE_END_TX performs end-of-transaction processing.  Calculates
 	 * latency, and logs the transaction.  In --connect mode, closes the
 	 * current connection.  Chooses the next script to execute and starts over
@@ -364,6 +441,18 @@ typedef struct
 
 	bool		prepared[MAX_SCRIPTS];	/* whether client prepared the script */
 
+	/*
+	 * For processing errors and repeating transactions with serialization or
+	 * deadlock failures:
+	 */
+	Failure		first_failure;	/* status and command number of the first
+								 * failure in the current transaction execution;
+								 * status NO_FAILURE if there were no failures
+								 * or errors */
+	RetryState  retry_state;
+	uint32			retries;	/* how many times have we already retried the
+								 * current transaction? */
+
 	/* per client collected stats */
 	int64		cnt;			/* client transaction count, for -t */
 	int			ecnt;			/* error count */
@@ -417,7 +506,8 @@ typedef struct
 	instr_time	start_time;		/* thread start time */
 	instr_time	conn_time;
 	StatsData	stats;
-	int64		latency_late;	/* executed but late transactions */
+	int64		latency_late;	/* executed but late transactions (including
+								 * errors) */
 } TState;
 
 #define INVALID_THREAD		((pthread_t) 0)
@@ -463,6 +553,10 @@ typedef struct
 	char	   *argv[MAX_ARGS]; /* command word list */
 	PgBenchExpr *expr;			/* parsed expression, if needed */
 	SimpleStats stats;			/* time spent in this command */
+	int64		retries;
+	int64		errors;			/* number of failures that were not retried */
+	int64		errors_in_failed_tx;	/* number of errors
+										 * ERRCODE_IN_FAILED_SQL_TRANSACTION */
 } Command;
 
 typedef struct ParsedScript
@@ -478,7 +572,18 @@ static int	num_scripts;		/* number of scripts in sql_script[] */
 static int	num_commands = 0;	/* total number of Command structs */
 static int64 total_weight = 0;
 
-static int	debug = 0;			/* debug flag */
+typedef enum DebugLevel
+{
+	NO_DEBUG = 0,				/* no debugging output (except PGBENCH_DEBUG) */
+	DEBUG_FAILS,				/* print only failure messages, errors and
+								 * retries */
+	DEBUG_ALL,					/* print all debugging output (throttling,
+								 * executed/sent/received commands etc.) */
+	NUM_DEBUGLEVEL
+} DebugLevel;
+
+static DebugLevel debug_level = NO_DEBUG;	/* debug flag */
+static const char *DEBUGLEVEL[] = {"no", "fails", "all"};
 
 /* Builtin test scripts */
 typedef struct BuiltinScript
@@ -534,9 +639,22 @@ typedef enum ErrorLevel
 	ELEVEL_DEBUG,
 
 	/*
-	 * To report the error/log messages and/or PGBENCH_DEBUG.
+	 * Normal failure of the SQL/meta command, or processing of the failed
+	 * transaction (its end/retry).
+	 */
+	ELEVEL_LOG_CLIENT_FAIL,
+
+	/*
+	 * Something serious e.g. connection with the backend was lost.. therefore
+	 * abort the client.
 	 */
-	ELEVEL_LOG,
+	ELEVEL_LOG_CLIENT_ABORTED,
+
+	/*
+	 * To report the error/log messages of the main program and/or
+	 * PGBENCH_DEBUG.
+	 */
+	ELEVEL_LOG_MAIN,
 
 	/*
 	 * To report the error messages of the main program and to exit immediately.
@@ -641,7 +759,6 @@ static int  errmsgImpl(const char *fmt,...) pg_attribute_printf(1, 2);
 static void errfinishImpl(int dummy,...);
 #endif							/* ENABLE_THREAD_SAFETY && HAVE__VA_ARGS */
 
-
 /* callback functions for our flex lexer */
 static const PsqlScanCallbacks pgbench_callbacks = {
 	NULL,						/* don't need get_variable functionality */
@@ -688,7 +805,7 @@ usage(void)
 		   "                           protocol for submitting queries (default: simple)\n"
 		   "  -n, --no-vacuum          do not run VACUUM before tests\n"
 		   "  -P, --progress=NUM       show thread progress report every NUM seconds\n"
-		   "  -r, --report-latencies   report average latency per command\n"
+		   "  -r, --report-per-command report latencies, errors and retries per command\n"
 		   "  -R, --rate=NUM           target rate in transactions per second\n"
 		   "  -s, --scale=NUM          report this scale factor in output\n"
 		   "  -t, --transactions=NUM   number of transactions each client runs (default: 10)\n"
@@ -697,11 +814,12 @@ usage(void)
 		   "  --aggregate-interval=NUM aggregate data over NUM seconds\n"
 		   "  --log-prefix=PREFIX      prefix for transaction time log file\n"
 		   "                           (default: \"pgbench_log\")\n"
+		   "  --max-tries=NUM          max number of tries to run transaction\n"
 		   "  --progress-timestamp     use Unix epoch timestamps for progress\n"
 		   "  --random-seed=SEED       set random seed (\"time\", \"rand\", integer)\n"
 		   "  --sampling-rate=NUM      fraction of transactions to log (e.g., 0.01 for 1%%)\n"
 		   "\nCommon options:\n"
-		   "  -d, --debug              print debugging output\n"
+		   "  -d, --debug=no|fails|all print debugging output (default: no)\n"
 		   "  -h, --host=HOSTNAME      database server host or socket directory\n"
 		   "  -p, --port=PORT          database server port number\n"
 		   "  -U, --username=USERNAME  connect as specified database user\n"
@@ -784,7 +902,7 @@ strtoint64(const char *str)
 	/* require at least one digit */
 	if (!isdigit((unsigned char) *ptr))
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("invalid input syntax for integer: \"%s\"\n", str)));
 	}
 
@@ -795,7 +913,7 @@ strtoint64(const char *str)
 
 		if ((tmp / 10) != result)	/* overflow? */
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("value \"%s\" is out of range for type bigint\n",
 							str)));
 		}
@@ -810,7 +928,7 @@ gotdigits:
 
 	if (*ptr != '\0')
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("invalid input syntax for integer: \"%s\"\n", str)));
 	}
 
@@ -1164,6 +1282,10 @@ initStats(StatsData *sd, time_t start_time)
 	sd->start_time = start_time;
 	sd->cnt = 0;
 	sd->skipped = 0;
+	sd->retries = 0;
+	sd->retried = 0;
+	sd->errors = 0;
+	sd->errors_in_failed_tx = 0;
 	initSimpleStats(&sd->latency);
 	initSimpleStats(&sd->lag);
 }
@@ -1172,8 +1294,30 @@ initStats(StatsData *sd, time_t start_time)
  * Accumulate one additional item into the given stats object.
  */
 static void
-accumStats(StatsData *stats, bool skipped, double lat, double lag)
+accumStats(StatsData *stats, bool skipped, double lat, double lag,
+		   FailureStatus first_error, int64 retries)
 {
+	/*
+	 * Record the number of retries regardless of whether the transaction was
+	 * successful or failed.
+	 */
+	stats->retries += retries;
+	if (retries > 0)
+		stats->retried++;
+
+	/* Record the failed transaction */
+	if (first_error != NO_FAILURE)
+	{
+		stats->errors++;
+
+		if (first_error == IN_FAILED_SQL_TRANSACTION)
+			stats->errors_in_failed_tx++;
+
+		return;
+	}
+
+	/* Record the successful transaction */
+
 	stats->cnt++;
 
 	if (skipped)
@@ -1212,7 +1356,7 @@ tryExecuteStatement(PGconn *con, const char *sql)
 	res = PQexec(con, sql);
 	if (PQresultStatus(res) != PGRES_COMMAND_OK)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("%s(ignoring this error and continuing anyway)\n",
 						PQerrorMessage(con))));
 	}
@@ -1260,7 +1404,7 @@ doConnect(void)
 
 		if (!conn)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("connection to database \"%s\" failed\n", dbName)));
 			return NULL;
 		}
@@ -1279,7 +1423,7 @@ doConnect(void)
 	/* check to see that the backend connection was successfully made */
 	if (PQstatus(conn) == CONNECTION_BAD)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("connection to database \"%s\" failed:\n%s",
 						dbName, PQerrorMessage(conn))));
 		PQfinish(conn);
@@ -1419,7 +1563,7 @@ makeVariableValue(Variable *var)
 
 		if (sscanf(var->svalue, "%lf%c", &dv, &xs) != 1)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_CLIENT_FAIL,
 					(errmsg("malformed variable \"%s\" value: \"%s\"\n",
 							var->name, var->svalue)));
 			return false;
@@ -1490,7 +1634,7 @@ lookupCreateVariable(Variables *variables, const char *context, char *name,
 			 * About the error level used: if we process client commands, it a
 			 * normal failure; otherwise it is not and we exit the program.
 			 */
-			ereport(client ? ELEVEL_LOG : ELEVEL_FATAL,
+			ereport(client ? ELEVEL_LOG_CLIENT_FAIL : ELEVEL_FATAL,
 					(errmsg("%s: invalid variable name: \"%s\"\n",
 							context, name)));
 			return NULL;
@@ -1706,7 +1850,7 @@ coerceToBool(PgBenchValue *pval, bool *bval)
 	}
 	else						/* NULL, INT or DOUBLE */
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("cannot coerce %s to boolean\n", valueTypeName(pval))));
 		*bval = false;			/* suppress uninitialized-variable warnings */
 		return false;
@@ -1752,7 +1896,7 @@ coerceToInt(PgBenchValue *pval, int64 *ival)
 
 		if (dval < PG_INT64_MIN || PG_INT64_MAX < dval)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_CLIENT_FAIL,
 					(errmsg("double to int overflow for %f\n", dval)));
 			return false;
 		}
@@ -1761,7 +1905,7 @@ coerceToInt(PgBenchValue *pval, int64 *ival)
 	}
 	else						/* BOOLEAN or NULL */
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("cannot coerce %s to int\n", valueTypeName(pval))));
 		return false;
 	}
@@ -1783,7 +1927,7 @@ coerceToDouble(PgBenchValue *pval, double *dval)
 	}
 	else						/* BOOLEAN or NULL */
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("cannot coerce %s to double\n", valueTypeName(pval))));
 		return false;
 	}
@@ -1965,7 +2109,7 @@ evalStandardFunc(TState *thread, CState *st,
 
 	if (l != NULL)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("too many function arguments, maximum is %d\n",
 					   MAX_FARGS)));
 		return false;
@@ -2090,7 +2234,7 @@ evalStandardFunc(TState *thread, CState *st,
 						case PGBENCH_MOD:
 							if (ri == 0)
 							{
-								ereport(ELEVEL_LOG,
+								ereport(ELEVEL_LOG_CLIENT_FAIL,
 										(errmsg("division by zero\n")));
 								return false;
 							}
@@ -2103,7 +2247,7 @@ evalStandardFunc(TState *thread, CState *st,
 									if (li == PG_INT64_MIN)
 									{
 										ereport(
-											ELEVEL_LOG,
+											ELEVEL_LOG_CLIENT_FAIL,
 											(errmsg("bigint out of range\n")));
 										return false;
 									}
@@ -2239,7 +2383,7 @@ evalStandardFunc(TState *thread, CState *st,
 					Assert(0);
 				}
 
-				ereport(ELEVEL_LOG, (errmsg("%s", errormsg_buf.data)));
+				ereport(ELEVEL_LOG_MAIN, (errmsg("%s", errormsg_buf.data)));
 				termPQExpBuffer(&errormsg_buf);
 
 				*retval = *varg;
@@ -2364,14 +2508,14 @@ evalStandardFunc(TState *thread, CState *st,
 				/* check random range */
 				if (imin > imax)
 				{
-					ereport(ELEVEL_LOG,
+					ereport(ELEVEL_LOG_CLIENT_FAIL,
 							(errmsg("empty range given to random\n")));
 					return false;
 				}
 				else if (imax - imin < 0 || (imax - imin) + 1 < 0)
 				{
 					/* prevent int overflows in random functions */
-					ereport(ELEVEL_LOG,
+					ereport(ELEVEL_LOG_CLIENT_FAIL,
 							(errmsg("random range is too large\n")));
 					return false;
 				}
@@ -2394,7 +2538,7 @@ evalStandardFunc(TState *thread, CState *st,
 					{
 						if (param < MIN_GAUSSIAN_PARAM)
 						{
-							ereport(ELEVEL_LOG,
+							ereport(ELEVEL_LOG_CLIENT_FAIL,
 									(errmsg("gaussian parameter must be at least %f (not %f)\n",
 											MIN_GAUSSIAN_PARAM, param)));
 							return false;
@@ -2408,7 +2552,7 @@ evalStandardFunc(TState *thread, CState *st,
 					{
 						if (param <= 0.0 || param == 1.0 || param > MAX_ZIPFIAN_PARAM)
 						{
-							ereport(ELEVEL_LOG,
+							ereport(ELEVEL_LOG_CLIENT_FAIL,
 									(errmsg("zipfian parameter must be in range (0, 1) U (1, %d] (got %f)\n",
 											MAX_ZIPFIAN_PARAM, param)));
 							return false;
@@ -2421,7 +2565,7 @@ evalStandardFunc(TState *thread, CState *st,
 					{
 						if (param <= 0.0)
 						{
-							ereport(ELEVEL_LOG,
+							ereport(ELEVEL_LOG_CLIENT_FAIL,
 									(errmsg("exponential parameter must be greater than zero (got %f)\n",
 											param)));
 							return false;
@@ -2534,7 +2678,7 @@ evaluateExpr(TState *thread, CState *st, PgBenchExpr *expr, PgBenchValue *retval
 
 				if ((var = lookupVariable(&st->variables, expr->u.variable.varname)) == NULL)
 				{
-					ereport(ELEVEL_LOG,
+					ereport(ELEVEL_LOG_CLIENT_FAIL,
 							(errmsg("undefined variable \"%s\"\n",
 									expr->u.variable.varname)));
 					return false;
@@ -2630,7 +2774,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 		}
 		else if ((arg = getVariable(variables, argv[i] + 1)) == NULL)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_CLIENT_FAIL,
 					(errmsg("%s: undefined variable \"%s\"\n",
 							argv[0], argv[i])));
 			return false;
@@ -2639,7 +2783,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 		arglen = strlen(arg);
 		if (len + arglen + (i > 0 ? 1 : 0) >= SHELL_COMMAND_SIZE - 1)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_CLIENT_FAIL,
 					(errmsg("%s: shell command is too long\n", argv[0])));
 			return false;
 		}
@@ -2659,7 +2803,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 		{
 			if (!timer_exceeded)
 			{
-				ereport(ELEVEL_LOG,
+				ereport(ELEVEL_LOG_CLIENT_FAIL,
 						(errmsg("%s: could not launch shell command\n",
 								argv[0])));
 			}
@@ -2671,7 +2815,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 	/* Execute the command with pipe and read the standard output. */
 	if ((fp = popen(command, "r")) == NULL)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("%s: could not launch shell command\n", argv[0])));
 		return false;
 	}
@@ -2679,7 +2823,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 	{
 		if (!timer_exceeded)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_CLIENT_FAIL,
 					(errmsg("%s: could not read result of shell command\n",
 							argv[0])));
 		}
@@ -2688,7 +2832,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 	}
 	if (pclose(fp) < 0)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("%s: could not close shell command\n", argv[0])));
 		return false;
 	}
@@ -2699,7 +2843,7 @@ runShellCommand(Variables *variables, char *variable, char **argv, int argc)
 		endptr++;
 	if (*res == '\0' || *endptr != '\0')
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_CLIENT_FAIL,
 				(errmsg("%s: shell command must return an integer (not \"%s\")\n",
 						argv[0], res)));
 		return false;
@@ -2721,11 +2865,50 @@ preparedStatementName(char *buffer, int file, int state)
 }
 
 static void
-commandFailed(CState *st, const char *cmd, const char *message)
+commandFailed(CState *st, const char *cmd, const char *message,
+			  ErrorLevel elevel)
 {
-	ereport(ELEVEL_LOG,
-			(errmsg("client %d aborted in command %d (%s) of script %d; %s\n",
-					st->id, st->command, cmd, st->use_file, message)));
+	switch (elevel)
+	{
+		case ELEVEL_LOG_CLIENT_FAIL:
+			if (st->first_failure.status == NO_FAILURE)
+			{
+				/*
+				 * This is the first failure during the execution of the current
+				 * script.
+				 */
+				ereport(ELEVEL_LOG_CLIENT_FAIL,
+						(errmsg("client %d got a failure in command %d (%s) of script %d; %s\n",
+								st->id, st->command, cmd, st->use_file,
+								message)));
+			}
+			else
+			{
+				/*
+				 * This is not the first failure during the execution of the
+				 * current script.
+				 */
+				ereport(ELEVEL_LOG_CLIENT_FAIL,
+						(errmsg("client %d continues a failed transaction in command %d (%s) of script %d; %s\n",
+								st->id, st->command, cmd, st->use_file,
+								message)));
+			}
+			break;
+		case ELEVEL_LOG_CLIENT_ABORTED:
+			ereport(ELEVEL_LOG_CLIENT_ABORTED,
+					(errmsg("client %d aborted in command %d (%s) of script %d; %s\n",
+							st->id, st->command, cmd, st->use_file, message)));
+			break;
+		case ELEVEL_DEBUG:
+		case ELEVEL_LOG_MAIN:
+		case ELEVEL_FATAL:
+		default:
+			/* internal error which should never occur */
+			ereport(ELEVEL_FATAL,
+					(errmsg("unexpected error level when the command failed: %d\n",
+							elevel)));
+			break;
+	}
 }
 
 /* return a script number with a weighted choice. */
@@ -2797,7 +2980,7 @@ sendCommand(CState *st, Command *command)
 								commands[j]->argv[0], commands[j]->argc - 1, NULL);
 				if (PQresultStatus(res) != PGRES_COMMAND_OK)
 				{
-					ereport(ELEVEL_LOG,
+					ereport(ELEVEL_LOG_MAIN,
 							(errmsg("%s", PQerrorMessage(st->con))));
 				}
 				PQclear(res);
@@ -2820,7 +3003,6 @@ sendCommand(CState *st, Command *command)
 		ereport(ELEVEL_DEBUG,
 				(errmsg("client %d could not send %s\n",
 						st->id, command->argv[0])));
-		st->ecnt++;
 		return false;
 	}
 	else
@@ -2841,7 +3023,7 @@ evaluateSleep(Variables *variables, int argc, char **argv, int *usecs)
 	{
 		if ((var = getVariable(variables, argv[1] + 1)) == NULL)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_CLIENT_FAIL,
 					(errmsg("%s: undefined variable \"%s\"\n",
 							argv[0], argv[1])));
 			return false;
@@ -2866,6 +3048,186 @@ evaluateSleep(Variables *variables, int argc, char **argv, int *usecs)
 }
 
 /*
+ * Get the number of all processed transactions including skipped ones and
+ * errors.
+ */
+static int64
+getTotalCnt(const CState *st)
+{
+	return st->cnt + st->ecnt;
+}
+
+/*
+ * Copy an array of random state.
+ */
+static void
+copyRandomState(RandomState *destination, const RandomState *source)
+{
+	memcpy(destination->data, source->data, sizeof(unsigned short) * 3);
+}
+
+/*
+ * Make a deep copy of variables array.
+ */
+static void
+copyVariables(Variables *destination_vars, const Variables *source_vars)
+{
+	Variable   *destination;
+	Variable   *current_destination;
+	const Variable *source;
+	const Variable *current_source;
+	int			nvariables;
+
+	if (!destination_vars || !source_vars)
+		return;
+
+	destination = destination_vars->array;
+	source = source_vars->array;
+	nvariables = source_vars->nvariables;
+
+	for (current_destination = destination;
+		 current_destination - destination < destination_vars->nvariables;
+		 ++current_destination)
+	{
+		pg_free(current_destination->name);
+		pg_free(current_destination->svalue);
+	}
+
+	destination_vars->array = pg_realloc(destination_vars->array,
+										 sizeof(Variable) * nvariables);
+	destination = destination_vars->array;
+
+	for (current_source = source, current_destination = destination;
+		 current_source - source < nvariables;
+		 ++current_source, ++current_destination)
+	{
+		current_destination->name = pg_strdup(current_source->name);
+		if (current_source->svalue)
+			current_destination->svalue = pg_strdup(current_source->svalue);
+		else
+			current_destination->svalue = NULL;
+		current_destination->value = current_source->value;
+	}
+
+	destination_vars->nvariables = nvariables;
+	destination_vars->vars_sorted = source_vars->vars_sorted;
+}
+
+/*
+ * Returns true if this type of failure can be retried.
+ */
+static bool
+canRetryFailure(FailureStatus failure_status)
+{
+	return (failure_status == SERIALIZATION_FAILURE ||
+			failure_status == DEADLOCK_FAILURE);
+}
+
+/*
+ * Returns true if the failure can be retried.
+ */
+static bool
+canRetry(CState *st, instr_time *now)
+{
+	FailureStatus failure_status = st->first_failure.status;
+
+	Assert(failure_status != NO_FAILURE);
+
+	/* We can only retry serialization or deadlock failures. */
+	if (!canRetryFailure(failure_status))
+		return false;
+
+	/*
+	 * We must have at least one option to limit the retrying of failed
+	 * transactions.
+	 */
+	Assert(max_tries || latency_limit);
+
+	/*
+	 * We cannot retry the failure if we have reached the maximum number of
+	 * tries.
+	 */
+	if (max_tries && st->retries + 1 >= max_tries)
+		return false;
+
+	/*
+	 * We cannot retry the failure if we spent too much time on this
+	 * transaction.
+	 */
+	if (latency_limit)
+	{
+		if (INSTR_TIME_IS_ZERO(*now))
+			INSTR_TIME_SET_CURRENT(*now);
+
+		if (INSTR_TIME_GET_MICROSEC(*now) - st->txn_scheduled >= latency_limit)
+			return false;
+	}
+
+	/* OK */
+	return true;
+}
+
+/*
+ * Process the conditional stack depending on the condition value; is used for
+ * the meta commands \if and \elif.
+ */
+static void
+executeCondition(CState *st, bool condition)
+{
+	Command    *command = sql_script[st->use_file].commands[st->command];
+
+	/* execute or not depending on evaluated condition */
+	if (command->meta == META_IF)
+	{
+		conditional_stack_push(st->cstack,
+							   condition ? IFSTATE_TRUE : IFSTATE_FALSE);
+	}
+	else if (command->meta == META_ELIF)
+	{
+		/* we should get here only if the "elif" needed evaluation */
+		Assert(conditional_stack_peek(st->cstack) == IFSTATE_FALSE);
+		conditional_stack_poke(st->cstack,
+							   condition ? IFSTATE_TRUE : IFSTATE_FALSE);
+	}
+}
+
+/*
+ * Get the failure status from the error code.
+ */
+static FailureStatus
+getFailureStatus(char *sqlState)
+{
+	if (sqlState)
+	{
+		if (strcmp(sqlState, ERRCODE_T_R_SERIALIZATION_FAILURE) == 0)
+			return SERIALIZATION_FAILURE;
+		else if (strcmp(sqlState, ERRCODE_T_R_DEADLOCK_DETECTED) == 0)
+			return DEADLOCK_FAILURE;
+		else if (strcmp(sqlState, ERRCODE_IN_FAILED_SQL_TRANSACTION) == 0)
+			return IN_FAILED_SQL_TRANSACTION;
+	}
+
+	return ANOTHER_FAILURE;
+}
+
+/*
+ * If the latency limit is used, return a percentage of the current transaction
+ * latency from the latency limit. Otherwise return zero.
+ */
+static double
+getLatencyUsed(CState *st, instr_time *now)
+{
+	if (!latency_limit)
+		return 0;
+
+	if (INSTR_TIME_IS_ZERO(*now))
+		INSTR_TIME_SET_CURRENT(*now);
+
+	return (100.0 * (INSTR_TIME_GET_MICROSEC(*now) - st->txn_scheduled) /
+			latency_limit);
+}
+
+/*
  * Advance the state machine of a connection, if possible.
  */
 static void
@@ -2876,6 +3238,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 	instr_time	now;
 	bool		end_tx_processed = false;
 	int64		wait;
+	FailureStatus failure_status = NO_FAILURE;
 
 	/*
 	 * gettimeofday() isn't free, so we get the current timestamp lazily the
@@ -2916,6 +3279,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					st->state = CSTATE_START_TX;
 				/* check consistency */
 				Assert(conditional_stack_empty(st->cstack));
+
+				/* reset transaction variables to default values */
+				st->first_failure.status = NO_FAILURE;
+				st->retries = 0;
+
 				break;
 
 				/*
@@ -2963,7 +3331,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						INSTR_TIME_SET_CURRENT(now);
 					now_us = INSTR_TIME_GET_MICROSEC(now);
 					while (thread->throttle_trigger < now_us - latency_limit &&
-						   (nxacts <= 0 || st->cnt < nxacts))
+						   (nxacts <= 0 || getTotalCnt(st) < nxacts))
 					{
 						processXactStats(thread, st, &now, true, agg);
 						/* next rendez-vous */
@@ -2973,7 +3341,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						st->txn_scheduled = thread->throttle_trigger;
 					}
 					/* stop client if -t exceeded */
-					if (nxacts > 0 && st->cnt >= nxacts)
+					if (nxacts > 0 && getTotalCnt(st) >= nxacts)
 					{
 						st->state = CSTATE_FINISHED;
 						break;
@@ -3015,7 +3383,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					start = now;
 					if ((st->con = doConnect()) == NULL)
 					{
-						ereport(ELEVEL_LOG,
+						ereport(ELEVEL_LOG_CLIENT_ABORTED,
 								(errmsg("client %d aborted while establishing connection\n",
 										st->id)));
 						st->state = CSTATE_ABORTED;
@@ -3029,6 +3397,15 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				}
 
 				/*
+				 * It is the first try to run this transaction. Remember its
+				 * parameters just in case if it fails or we should repeat it in
+				 * future.
+				 */
+				copyRandomState(&st->retry_state.random_state,
+								&st->random_state);
+				copyVariables(&st->retry_state.variables, &st->variables);
+
+				/*
 				 * Record transaction start time under logging, progress or
 				 * throttling.
 				 */
@@ -3064,7 +3441,15 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 */
 				if (command == NULL)
 				{
-					st->state = CSTATE_END_TX;
+					if (st->first_failure.status == NO_FAILURE)
+					{
+						st->state = CSTATE_END_TX;
+					}
+					else
+					{
+						/* check if we can retry the failure */
+						st->state = CSTATE_RETRY;
+					}
 					break;
 				}
 
@@ -3072,7 +3457,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * Record statement start time if per-command latencies are
 				 * requested
 				 */
-				if (is_latencies)
+				if (report_per_command)
 				{
 					if (INSTR_TIME_IS_ZERO(now))
 						INSTR_TIME_SET_CURRENT(now);
@@ -3083,7 +3468,8 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				{
 					if (!sendCommand(st, command))
 					{
-						commandFailed(st, "SQL", "SQL command send failed");
+						commandFailed(st, "SQL", "SQL command send failed",
+									  ELEVEL_LOG_CLIENT_ABORTED);
 						st->state = CSTATE_ABORTED;
 					}
 					else
@@ -3105,6 +3491,9 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					ereport(ELEVEL_DEBUG, (errmsg("%s", errmsg_buf.data)));
 					termPQExpBuffer(&errmsg_buf);
 
+					/* change it if the meta command fails */
+					failure_status = NO_FAILURE;
+
 					if (command->meta == META_SLEEP)
 					{
 						/*
@@ -3118,8 +3507,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 
 						if (!evaluateSleep(&st->variables, argc, argv, &usec))
 						{
-							commandFailed(st, "sleep", "execution of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, "sleep",
+										  "execution of meta-command failed",
+										  ELEVEL_LOG_CLIENT_FAIL);
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 
@@ -3150,8 +3542,18 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 
 						if (!evaluateExpr(thread, st, expr, &result))
 						{
-							commandFailed(st, argv[0], "evaluation of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, argv[0],
+										  "evaluation of meta-command failed",
+										  ELEVEL_LOG_CLIENT_FAIL);
+
+							/*
+							 * Do not ruin the following conditional commands,
+							 * if any.
+							 */
+							executeCondition(st, false);
+
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 
@@ -3160,29 +3562,17 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 							if (!putVariableValue(&st->variables,  argv[0],
 												  argv[1], &result, true))
 							{
-								commandFailed(st, "set", "assignment of meta-command failed");
-								st->state = CSTATE_ABORTED;
+								commandFailed(st, "set",
+											  "assignment of meta-command failed",
+											  ELEVEL_LOG_CLIENT_FAIL);
+								failure_status = ANOTHER_FAILURE;
+								st->state = CSTATE_FAILURE;
 								break;
 							}
 						}
 						else	/* if and elif evaluated cases */
 						{
-							bool		cond = valueTruth(&result);
-
-							/* execute or not depending on evaluated condition */
-							if (command->meta == META_IF)
-							{
-								conditional_stack_push(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE);
-							}
-							else	/* elif */
-							{
-								/*
-								 * we should get here only if the "elif"
-								 * needed evaluation
-								 */
-								Assert(conditional_stack_peek(st->cstack) == IFSTATE_FALSE);
-								conditional_stack_poke(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE);
-							}
+							executeCondition(st, valueTruth(&result));
 						}
 					}
 					else if (command->meta == META_ELSE)
@@ -3222,8 +3612,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						}
 						else if (!ret)	/* on error */
 						{
-							commandFailed(st, "setshell", "execution of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, "setshell",
+										  "execution of meta-command failed",
+										  ELEVEL_LOG_CLIENT_FAIL);
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 						else
@@ -3243,8 +3636,11 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 						}
 						else if (!ret)	/* on error */
 						{
-							commandFailed(st, "shell", "execution of meta-command failed");
-							st->state = CSTATE_ABORTED;
+							commandFailed(st, "shell",
+										  "execution of meta-command failed",
+										  ELEVEL_LOG_CLIENT_FAIL);
+							failure_status = ANOTHER_FAILURE;
+							st->state = CSTATE_FAILURE;
 							break;
 						}
 						else
@@ -3360,37 +3756,55 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * Wait for the current SQL command to complete
 				 */
 			case CSTATE_WAIT_RESULT:
-				command = sql_script[st->use_file].commands[st->command];
-				ereport(ELEVEL_DEBUG,
-						(errmsg("client %d receiving\n", st->id)));
-				if (!PQconsumeInput(st->con))
-				{				/* there's something wrong */
-					commandFailed(st, "SQL", "perhaps the backend died while processing");
-					st->state = CSTATE_ABORTED;
-					break;
-				}
-				if (PQisBusy(st->con))
-					return;		/* don't have the whole result yet */
-
-				/*
-				 * Read and discard the query result;
-				 */
-				res = PQgetResult(st->con);
-				switch (PQresultStatus(res))
 				{
-					case PGRES_COMMAND_OK:
-					case PGRES_TUPLES_OK:
-					case PGRES_EMPTY_QUERY:
-						/* OK */
-						PQclear(res);
-						discard_response(st);
-						st->state = CSTATE_END_COMMAND;
-						break;
-					default:
-						commandFailed(st, "SQL", PQerrorMessage(st->con));
-						PQclear(res);
+					char	   *sqlState;
+
+					command = sql_script[st->use_file].commands[st->command];
+					ereport(ELEVEL_DEBUG,
+							(errmsg("client %d receiving\n", st->id)));
+					if (!PQconsumeInput(st->con))
+					{				/* there's something wrong */
+						commandFailed(st, "SQL",
+									  "perhaps the backend died while processing",
+									  ELEVEL_LOG_CLIENT_ABORTED);
 						st->state = CSTATE_ABORTED;
 						break;
+					}
+					if (PQisBusy(st->con))
+						return;		/* don't have the whole result yet */
+
+					/*
+					 * Read and discard the query result;
+					 */
+					res = PQgetResult(st->con);
+					sqlState = PQresultErrorField(res, PG_DIAG_SQLSTATE);
+					switch (PQresultStatus(res))
+					{
+						case PGRES_COMMAND_OK:
+						case PGRES_TUPLES_OK:
+						case PGRES_EMPTY_QUERY:
+							/* OK */
+							PQclear(res);
+							discard_response(st);
+							failure_status = NO_FAILURE;
+							st->state = CSTATE_END_COMMAND;
+							break;
+						case PGRES_NONFATAL_ERROR:
+						case PGRES_FATAL_ERROR:
+							failure_status = getFailureStatus(sqlState);
+							commandFailed(st, "SQL", PQerrorMessage(st->con),
+										  ELEVEL_LOG_CLIENT_FAIL);
+							PQclear(res);
+							discard_response(st);
+							st->state = CSTATE_FAILURE;
+							break;
+						default:
+							commandFailed(st, "SQL", PQerrorMessage(st->con),
+										  ELEVEL_LOG_CLIENT_ABORTED);
+							PQclear(res);
+							st->state = CSTATE_ABORTED;
+							break;
+					}
 				}
 				break;
 
@@ -3419,7 +3833,7 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				 * in thread-local data structure, if per-command latencies
 				 * are requested.
 				 */
-				if (is_latencies)
+				if (report_per_command)
 				{
 					if (INSTR_TIME_IS_ZERO(now))
 						INSTR_TIME_SET_CURRENT(now);
@@ -3438,6 +3852,139 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 				break;
 
 				/*
+				 * Remember the failure and go ahead with next command.
+				 */
+			case CSTATE_FAILURE:
+
+				Assert(failure_status != NO_FAILURE);
+
+				/*
+				 * All subsequent failures will be "retried"/"failed" if the
+				 * first failure of this transaction can be/cannot be retried.
+				 * Therefore remember only the first failure.
+				 */
+				if (st->first_failure.status == NO_FAILURE)
+				{
+					st->first_failure.status = failure_status;
+					st->first_failure.command = st->command;
+				}
+
+				/* Go ahead with next command, to be executed or skipped */
+				st->command++;
+				st->state = conditional_active(st->cstack) ?
+					CSTATE_START_COMMAND : CSTATE_SKIP_COMMAND;
+				break;
+
+			/*
+			 * Retry the failed transaction if possible.
+			 */
+			case CSTATE_RETRY:
+				{
+					PQExpBufferData errmsg_buf;
+
+					command = sql_script[st->use_file].commands[st->first_failure.command];
+
+					if (canRetry(st, &now))
+					{
+						/*
+						 * The failed transaction will be retried. So accumulate
+						 * the retry.
+						 */
+						st->retries++;
+						command->retries++;
+
+						/*
+						 * Report this with failures to indicate that the failed
+						 * transaction will be retried.
+						 */
+						initPQExpBuffer(&errmsg_buf);
+						printfPQExpBuffer(&errmsg_buf,
+										  "client %d repeats the failed transaction (try %d",
+										  st->id, st->retries + 1);
+						if (max_tries)
+							appendPQExpBuffer(&errmsg_buf, "/%d", max_tries);
+						if (latency_limit)
+						{
+							appendPQExpBuffer(&errmsg_buf,
+											  ", %.3f%% of the maximum time of tries was used",
+											  getLatencyUsed(st, &now));
+						}
+						appendPQExpBufferStr(&errmsg_buf, ")\n");
+						ereport(ELEVEL_LOG_CLIENT_FAIL,
+								(errmsg("%s", errmsg_buf.data)));
+						termPQExpBuffer(&errmsg_buf);
+
+						/*
+						 * Reset the execution parameters as they were at the
+						 * beginning of the transaction.
+						 */
+						copyRandomState(&st->random_state,
+										&st->retry_state.random_state);
+						copyVariables(&st->variables, &st->retry_state.variables);
+
+						/* Process the first transaction command */
+						st->command = 0;
+						st->first_failure.status = NO_FAILURE;
+						st->state = CSTATE_START_COMMAND;
+					}
+					else
+					{
+						/*
+						 * We will not be able to retry this failed transaction.
+						 * So accumulate the error.
+						 */
+						command->errors++;
+						if (st->first_failure.status ==
+							IN_FAILED_SQL_TRANSACTION)
+							command->errors_in_failed_tx++;
+
+						/*
+						 * Report this with failures to indicate that the failed
+						 * transaction will not be retried.
+						 */
+						initPQExpBuffer(&errmsg_buf);
+						printfPQExpBuffer(&errmsg_buf,
+										  "client %d ends the failed transaction (try %d",
+										  st->id, st->retries + 1);
+
+						/*
+						 * Report the actual number and/or time of tries. We do
+						 * not need this information if this type of failure can
+						 * be never retried.
+						 */
+						if (canRetryFailure(st->first_failure.status))
+						{
+							if (max_tries)
+							{
+								appendPQExpBuffer(&errmsg_buf, "/%d",
+												  max_tries);
+							}
+							if (latency_limit)
+							{
+								appendPQExpBuffer(&errmsg_buf,
+												  ", %.3f%% of the maximum time of tries was used",
+												  getLatencyUsed(st, &now));
+							}
+						}
+						appendPQExpBufferStr(&errmsg_buf, ")\n");
+						ereport(ELEVEL_LOG_CLIENT_FAIL,
+								(errmsg("%s", errmsg_buf.data)));
+						termPQExpBuffer(&errmsg_buf);
+
+						/*
+						 * Reset the execution parameters as they were at the
+						 * beginning of the transaction except for a random
+						 * state.
+						 */
+						copyVariables(&st->variables, &st->retry_state.variables);
+
+						/* End the failed transaction */
+						st->state = CSTATE_END_TX;
+					}
+				}
+				break;
+
+				/*
 				 * End of transaction.
 				 */
 			case CSTATE_END_TX:
@@ -3458,7 +4005,8 @@ doCustom(TState *thread, CState *st, StatsData *agg)
 					INSTR_TIME_SET_ZERO(now);
 				}
 
-				if ((st->cnt >= nxacts && duration <= 0) || timer_exceeded)
+				if ((getTotalCnt(st) >= nxacts && duration <= 0) ||
+					timer_exceeded)
 				{
 					/* exit success */
 					st->state = CSTATE_FINISHED;
@@ -3534,13 +4082,15 @@ doLog(TState *thread, CState *st,
 		while (agg->start_time + agg_interval <= now)
 		{
 			/* print aggregated report to logfile */
-			fprintf(logfile, "%ld " INT64_FORMAT " %.0f %.0f %.0f %.0f",
+			fprintf(logfile, "%ld " INT64_FORMAT " %.0f %.0f %.0f %.0f " INT64_FORMAT " " INT64_FORMAT,
 					(long) agg->start_time,
 					agg->cnt,
 					agg->latency.sum,
 					agg->latency.sum2,
 					agg->latency.min,
-					agg->latency.max);
+					agg->latency.max,
+					agg->errors,
+					agg->errors_in_failed_tx);
 			if (throttle_delay)
 			{
 				fprintf(logfile, " %.0f %.0f %.0f %.0f",
@@ -3551,6 +4101,10 @@ doLog(TState *thread, CState *st,
 				if (latency_limit)
 					fprintf(logfile, " " INT64_FORMAT, agg->skipped);
 			}
+			if (max_tries > 1 || latency_limit)
+				fprintf(logfile, " " INT64_FORMAT " " INT64_FORMAT,
+						agg->retried,
+						agg->retries);
 			fputc('\n', logfile);
 
 			/* reset data and move to next interval */
@@ -3558,7 +4112,8 @@ doLog(TState *thread, CState *st,
 		}
 
 		/* accumulate the current transaction */
-		accumStats(agg, skipped, latency, lag);
+		accumStats(agg, skipped, latency, lag, st->first_failure.status,
+				   st->retries);
 	}
 	else
 	{
@@ -3568,14 +4123,25 @@ doLog(TState *thread, CState *st,
 		gettimeofday(&tv, NULL);
 		if (skipped)
 			fprintf(logfile, "%d " INT64_FORMAT " skipped %d %ld %ld",
-					st->id, st->cnt, st->use_file,
+					st->id, getTotalCnt(st), st->use_file,
 					(long) tv.tv_sec, (long) tv.tv_usec);
-		else
+		else if (st->first_failure.status == NO_FAILURE)
 			fprintf(logfile, "%d " INT64_FORMAT " %.0f %d %ld %ld",
-					st->id, st->cnt, latency, st->use_file,
+					st->id, getTotalCnt(st), latency, st->use_file,
+					(long) tv.tv_sec, (long) tv.tv_usec);
+		else if (st->first_failure.status == IN_FAILED_SQL_TRANSACTION)
+			fprintf(logfile, "%d " INT64_FORMAT " in_failed_tx %d %ld %ld",
+					st->id, getTotalCnt(st), st->use_file,
 					(long) tv.tv_sec, (long) tv.tv_usec);
+		else
+			fprintf(logfile, "%d " INT64_FORMAT " failed %d %ld %ld",
+					st->id, getTotalCnt(st), st->use_file,
+					(long) tv.tv_sec, (long) tv.tv_usec);
+
 		if (throttle_delay)
 			fprintf(logfile, " %.0f", lag);
+		if (max_tries > 1 || latency_limit)
+			fprintf(logfile, " %d", st->retries);
 		fputc('\n', logfile);
 	}
 }
@@ -3595,7 +4161,8 @@ processXactStats(TState *thread, CState *st, instr_time *now,
 	bool		thread_details = progress || throttle_delay || latency_limit,
 				detailed = thread_details || use_log || per_script_stats;
 
-	if (detailed && !skipped)
+	if (detailed && !skipped &&
+		(st->first_failure.status == NO_FAILURE || latency_limit))
 	{
 		if (INSTR_TIME_IS_ZERO(*now))
 			INSTR_TIME_SET_CURRENT(*now);
@@ -3608,7 +4175,8 @@ processXactStats(TState *thread, CState *st, instr_time *now,
 	if (thread_details)
 	{
 		/* keep detailed thread stats */
-		accumStats(&thread->stats, skipped, latency, lag);
+		accumStats(&thread->stats, skipped, latency, lag,
+				   st->first_failure.status, st->retries);
 
 		/* count transactions over the latency limit, if needed */
 		if (latency_limit && latency > latency_limit)
@@ -3616,19 +4184,24 @@ processXactStats(TState *thread, CState *st, instr_time *now,
 	}
 	else
 	{
-		/* no detailed stats, just count */
-		thread->stats.cnt++;
+		/* no detailed stats */
+		accumStats(&thread->stats, skipped, 0, 0, st->first_failure.status,
+				   st->retries);
 	}
 
 	/* client stat is just counting */
-	st->cnt++;
+	if (st->first_failure.status == NO_FAILURE)
+		st->cnt++;
+	else
+		st->ecnt++;
 
 	if (use_log)
 		doLog(thread, st, agg, skipped, latency, lag);
 
 	/* XXX could use a mutex here, but we choose not to */
 	if (per_script_stats)
-		accumStats(&sql_script[st->use_file].stats, skipped, latency, lag);
+		accumStats(&sql_script[st->use_file].stats, skipped, latency, lag,
+				   st->first_failure.status, st->retries);
 }
 
 
@@ -3648,7 +4221,7 @@ disconnect_all(CState *state, int length)
 static void
 initDropTables(PGconn *con)
 {
-	ereport(ELEVEL_LOG, (errmsg("dropping old tables...\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("dropping old tables...\n")));
 
 	/*
 	 * We drop all the tables in one command, so that whether there are
@@ -3723,7 +4296,7 @@ initCreateTables(PGconn *con)
 	};
 	int			i;
 
-	ereport(ELEVEL_LOG, (errmsg("creating tables...\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("creating tables...\n")));
 
 	for (i = 0; i < lengthof(DDLs); i++)
 	{
@@ -3776,7 +4349,7 @@ initGenerateData(PGconn *con)
 				remaining_sec;
 	int			log_interval = 1;
 
-	ereport(ELEVEL_LOG, (errmsg("generating data...\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("generating data...\n")));
 
 	/*
 	 * we do all of this in one transaction to enable the backend's
@@ -3849,7 +4422,7 @@ initGenerateData(PGconn *con)
 			elapsed_sec = INSTR_TIME_GET_DOUBLE(diff);
 			remaining_sec = ((double) scale * naccounts - j) * elapsed_sec / j;
 
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg(INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s)\n",
 							j, (int64) naccounts * scale,
 							(int) (((int64) j * 100) /
@@ -3868,7 +4441,7 @@ initGenerateData(PGconn *con)
 			/* have we reached the next interval (or end)? */
 			if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS))
 			{
-				ereport(ELEVEL_LOG,
+				ereport(ELEVEL_LOG_MAIN,
 						(errmsg(INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s)\n",
 								j, (int64) naccounts * scale,
 								(int) (((int64) j * 100) /
@@ -3895,7 +4468,7 @@ initGenerateData(PGconn *con)
 static void
 initVacuum(PGconn *con)
 {
-	ereport(ELEVEL_LOG, (errmsg("vacuuming...\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("vacuuming...\n")));
 	executeStatement(con, "vacuum analyze pgbench_branches");
 	executeStatement(con, "vacuum analyze pgbench_tellers");
 	executeStatement(con, "vacuum analyze pgbench_accounts");
@@ -3915,7 +4488,7 @@ initCreatePKeys(PGconn *con)
 	};
 	int			i;
 
-	ereport(ELEVEL_LOG, (errmsg("creating primary keys...\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("creating primary keys...\n")));
 	for (i = 0; i < lengthof(DDLINDEXes); i++)
 	{
 		char		buffer[256];
@@ -3952,7 +4525,7 @@ initCreateFKeys(PGconn *con)
 	};
 	int			i;
 
-	ereport(ELEVEL_LOG, (errmsg("creating foreign keys...\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("creating foreign keys...\n")));
 	for (i = 0; i < lengthof(DDLKEYs); i++)
 	{
 		executeStatement(con, DDLKEYs[i]);
@@ -4023,7 +4596,7 @@ runInitSteps(const char *initialize_steps)
 			case ' ':
 				break;			/* ignore */
 			default:
-				ereport(ELEVEL_LOG,
+				ereport(ELEVEL_LOG_MAIN,
 						(errmsg("unrecognized initialization step \"%c\"\n",
 								*step)));
 				PQfinish(con);
@@ -4031,7 +4604,7 @@ runInitSteps(const char *initialize_steps)
 		}
 	}
 
-	ereport(ELEVEL_LOG, (errmsg("done.\n")));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("done.\n")));
 	PQfinish(con);
 }
 
@@ -4069,7 +4642,7 @@ parseQuery(Command *cmd)
 
 		if (cmd->argc >= MAX_ARGS)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("statement has too many arguments (maximum is %d): %s\n",
 							MAX_ARGS - 1, cmd->argv[0])));
 			pg_free(name);
@@ -4109,7 +4682,7 @@ pgbench_error(const char *fmt,...)
 		va_end(ap);
 	} while (!done);
 
-	ereport(ELEVEL_LOG, (errmsg("%s", errmsg_buf.data)));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("%s", errmsg_buf.data)));
 	termPQExpBuffer(&errmsg_buf);
 }
 
@@ -4154,7 +4727,7 @@ syntax_error(const char *source, int lineno,
 		}
 	}
 
-	ereport(ELEVEL_LOG, (errmsg("%s", errmsg_buf.data)));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("%s", errmsg_buf.data)));
 	termPQExpBuffer(&errmsg_buf);
 	exit(1);
 }
@@ -4648,7 +5221,7 @@ listAvailableScripts(void)
 		appendPQExpBuffer(&errmsg_buf, "\t%s\n", builtin_script[i].name);
 	appendPQExpBufferChar(&errmsg_buf, '\n');
 
-	ereport(ELEVEL_LOG, (errmsg("%s", errmsg_buf.data)));
+	ereport(ELEVEL_LOG_MAIN, (errmsg("%s", errmsg_buf.data)));
 	termPQExpBuffer(&errmsg_buf);
 }
 
@@ -4677,12 +5250,12 @@ findBuiltin(const char *name)
 	/* error cases */
 	if (found == 0)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("no builtin script found for name \"%s\"\n", name)));
 	}
 	else
 	{						/* found > 1 */
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("ambiguous builtin name: %d builtin scripts found for prefix \"%s\"\n",
 						found, name)));
 	}
@@ -4782,7 +5355,8 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	double		time_include,
 				tps_include,
 				tps_exclude;
-	int64		ntx = total->cnt - total->skipped;
+	int64		ntx = total->cnt - total->skipped,
+				total_ntx = total->cnt + total->errors;
 	int			i,
 				totalCacheOverflows = 0;
 
@@ -4803,8 +5377,8 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	if (duration <= 0)
 	{
 		printf("number of transactions per client: %d\n", nxacts);
-		printf("number of transactions actually processed: " INT64_FORMAT "/%d\n",
-			   ntx, nxacts * nclients);
+		printf("number of transactions actually processed: " INT64_FORMAT "/" INT64_FORMAT "\n",
+			   ntx, total_ntx);
 	}
 	else
 	{
@@ -4812,6 +5386,43 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 		printf("number of transactions actually processed: " INT64_FORMAT "\n",
 			   ntx);
 	}
+
+	if (total->errors > 0)
+		printf("number of errors: " INT64_FORMAT " (%.3f%%)\n",
+			   total->errors, 100.0 * total->errors / total_ntx);
+
+	if (total->errors_in_failed_tx > 0)
+		printf("number of errors \"in failed SQL transaction\": " INT64_FORMAT " (%.3f%%)\n",
+			   total->errors_in_failed_tx,
+			   100.0 * total->errors_in_failed_tx / total_ntx);
+
+	/*
+	 * It can be non-zero only if max_tries is greater than one or
+	 * latency_limit is used.
+	 */
+	if (total->retried > 0)
+	{
+		printf("number of retried: " INT64_FORMAT " (%.3f%%)\n",
+			   total->retried, 100.0 * total->retried / total_ntx);
+		printf("number of retries: " INT64_FORMAT "\n", total->retries);
+	}
+
+	if (max_tries)
+		printf("maximum number of tries: %d\n", max_tries);
+
+	if (latency_limit)
+	{
+		printf("number of transactions above the %.1f ms latency limit: " INT64_FORMAT "/" INT64_FORMAT " (%.3f %%)",
+			   latency_limit / 1000.0, latency_late, total_ntx,
+			   (total_ntx > 0) ? 100.0 * latency_late / total_ntx : 0.0);
+
+		/* this statistics includes both successful and failed transactions */
+		if (total->errors > 0)
+			printf(" (including errors)");
+
+		printf("\n");
+	}
+
 	/* Report zipfian cache overflow */
 	for (i = 0; i < nthreads; i++)
 	{
@@ -4831,18 +5442,19 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 			   total->skipped,
 			   100.0 * total->skipped / total->cnt);
 
-	if (latency_limit)
-		printf("number of transactions above the %.1f ms latency limit: " INT64_FORMAT "/" INT64_FORMAT " (%.3f %%)\n",
-			   latency_limit / 1000.0, latency_late, ntx,
-			   (ntx > 0) ? 100.0 * latency_late / ntx : 0.0);
-
 	if (throttle_delay || progress || latency_limit)
 		printSimpleStats("latency", &total->latency);
 	else
 	{
 		/* no measurement, show average latency computed from run time */
-		printf("latency average = %.3f ms\n",
-			   1000.0 * time_include * nclients / total->cnt);
+		printf("latency average = %.3f ms",
+			   1000.0 * time_include * nclients / total_ntx);
+
+		/* this statistics includes both successful and failed transactions */
+		if (total->errors > 0)
+			printf(" (including errors)");
+
+		printf("\n");
 	}
 
 	if (throttle_delay)
@@ -4861,7 +5473,7 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 	printf("tps = %f (excluding connections establishing)\n", tps_exclude);
 
 	/* Report per-script/command statistics */
-	if (per_script_stats || is_latencies)
+	if (per_script_stats || report_per_command)
 	{
 		int			i;
 
@@ -4870,6 +5482,7 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 			if (per_script_stats)
 			{
 				StatsData  *sstats = &sql_script[i].stats;
+				int64		script_total_ntx = sstats->cnt + sstats->errors;
 
 				printf("SQL script %d: %s\n"
 					   " - weight: %d (targets %.1f%% of total)\n"
@@ -4878,9 +5491,33 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 					   sql_script[i].weight,
 					   100.0 * sql_script[i].weight / total_weight,
 					   sstats->cnt,
-					   100.0 * sstats->cnt / total->cnt,
+					   100.0 * sstats->cnt / script_total_ntx,
 					   (sstats->cnt - sstats->skipped) / time_include);
 
+				if (total->errors > 0)
+					printf(" - number of errors: " INT64_FORMAT " (%.3f%%)\n",
+						   sstats->errors,
+						   100.0 * sstats->errors / script_total_ntx);
+
+				if (total->errors_in_failed_tx > 0)
+					printf(" - number of errors \"in failed SQL transaction\": " INT64_FORMAT " (%.3f%%)\n",
+						   sstats->errors_in_failed_tx,
+						   (100.0 * sstats->errors_in_failed_tx /
+							script_total_ntx));
+
+				/*
+				 * It can be non-zero only if max_tries is greater than one or
+				 * latency_limit is used.
+				 */
+				if (total->retried > 0)
+				{
+					printf(" - number of retried: " INT64_FORMAT " (%.3f%%)\n",
+						   sstats->retried,
+						   100.0 * sstats->retried / script_total_ntx);
+					printf(" - number of retries: " INT64_FORMAT "\n",
+						   sstats->retries);
+				}
+
 				if (throttle_delay && latency_limit && sstats->cnt > 0)
 					printf(" - number of transactions skipped: " INT64_FORMAT " (%.3f%%)\n",
 						   sstats->skipped,
@@ -4889,15 +5526,33 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 				printSimpleStats(" - latency", &sstats->latency);
 			}
 
-			/* Report per-command latencies */
-			if (is_latencies)
+			/* Report per-command latencies and errors */
+			if (report_per_command)
 			{
 				Command   **commands;
 
 				if (per_script_stats)
-					printf(" - statement latencies in milliseconds:\n");
+					printf(" - statement latencies in milliseconds");
 				else
-					printf("statement latencies in milliseconds:\n");
+					printf("statement latencies in milliseconds");
+
+				if (total->errors > 0)
+				{
+					printf("%s errors",
+						   ((total->errors_in_failed_tx == 0 &&
+							total->retried == 0) ?
+							" and" : ","));
+				}
+				if (total->errors_in_failed_tx > 0)
+				{
+					printf("%s errors \"in failed SQL transaction\"",
+						   total->retried == 0 ? " and" : ",");
+				}
+				if (total->retried > 0)
+				{
+					printf(" and retries");
+				}
+				printf(":\n");
 
 				for (commands = sql_script[i].commands;
 					 *commands != NULL;
@@ -4905,10 +5560,25 @@ printResults(TState *threads, StatsData *total, instr_time total_time,
 				{
 					SimpleStats *cstats = &(*commands)->stats;
 
-					printf("   %11.3f  %s\n",
+					printf("   %11.3f",
 						   (cstats->count > 0) ?
-						   1000.0 * cstats->sum / cstats->count : 0.0,
-						   (*commands)->line);
+						   1000.0 * cstats->sum / cstats->count : 0.0);
+					if (total->errors > 0)
+					{
+						printf("  %20" INT64_MODIFIER "d",
+							   (*commands)->errors);
+					}
+					if (total->errors_in_failed_tx > 0)
+					{
+						printf("  %20" INT64_MODIFIER "d",
+							   (*commands)->errors_in_failed_tx);
+					}
+					if (total->retried > 0)
+					{
+						printf("  %20" INT64_MODIFIER "d",
+							   (*commands)->retries);
+					}
+					printf("  %s\n", (*commands)->line);
 				}
 			}
 		}
@@ -4937,7 +5607,7 @@ set_random_seed(const char *seed)
 		if (!pg_strong_random(&iseed, sizeof(iseed)))
 #endif
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("cannot seed random from a strong source, none available: use \"time\" or an unsigned integer value.\n")));
 			return false;
 		}
@@ -4949,7 +5619,7 @@ set_random_seed(const char *seed)
 
 		if (sscanf(seed, "%u%c", &iseed, &garbage) != 1)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("unrecognized random seed option \"%s\": expecting an unsigned integer, \"time\" or \"rand\"\n",
 							seed)));
 			return false;
@@ -4958,7 +5628,7 @@ set_random_seed(const char *seed)
 
 	if (seed != NULL)
 	{
-		ereport(ELEVEL_LOG,
+		ereport(ELEVEL_LOG_MAIN,
 				(errmsg("setting random seed to %u\n", iseed)));
 	}
 	srandom(iseed);
@@ -4987,7 +5657,7 @@ main(int argc, char **argv)
 		{"builtin", required_argument, NULL, 'b'},
 		{"client", required_argument, NULL, 'c'},
 		{"connect", no_argument, NULL, 'C'},
-		{"debug", no_argument, NULL, 'd'},
+		{"debug", required_argument, NULL, 'd'},
 		{"define", required_argument, NULL, 'D'},
 		{"file", required_argument, NULL, 'f'},
 		{"fillfactor", required_argument, NULL, 'F'},
@@ -5002,7 +5672,7 @@ main(int argc, char **argv)
 		{"progress", required_argument, NULL, 'P'},
 		{"protocol", required_argument, NULL, 'M'},
 		{"quiet", no_argument, NULL, 'q'},
-		{"report-latencies", no_argument, NULL, 'r'},
+		{"report-per-command", no_argument, NULL, 'r'},
 		{"rate", required_argument, NULL, 'R'},
 		{"scale", required_argument, NULL, 's'},
 		{"select-only", no_argument, NULL, 'S'},
@@ -5021,6 +5691,7 @@ main(int argc, char **argv)
 		{"log-prefix", required_argument, NULL, 7},
 		{"foreign-keys", no_argument, NULL, 8},
 		{"random-seed", required_argument, NULL, 9},
+		{"max-tries", required_argument, NULL, 10},
 		{NULL, 0, NULL, 0}
 	};
 
@@ -5096,7 +5767,7 @@ main(int argc, char **argv)
 				(errmsg("error while setting random seed from PGBENCH_RANDOM_SEED environment variable\n")));
 	}
 
-	while ((c = getopt_long(argc, argv, "iI:h:nvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
+	while ((c = getopt_long(argc, argv, "iI:h:nvp:d:qb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
 	{
 		char	   *script;
 
@@ -5126,8 +5797,22 @@ main(int argc, char **argv)
 				pgport = pg_strdup(optarg);
 				break;
 			case 'd':
-				debug++;
-				break;
+				{
+					for (debug_level = 0;
+						 debug_level < NUM_DEBUGLEVEL;
+						 debug_level++)
+					{
+						if (strcmp(optarg, DEBUGLEVEL[debug_level]) == 0)
+							break;
+					}
+					if (debug_level >= NUM_DEBUGLEVEL)
+					{
+						ereport(ELEVEL_FATAL,
+								(errmsg("invalid debug level (-d): \"%s\"\n",
+										optarg)));
+					}
+					break;
+				}
 			case 'c':
 				benchmarking_option_set = true;
 				nclients = atoi(optarg);
@@ -5180,7 +5865,7 @@ main(int argc, char **argv)
 				break;
 			case 'r':
 				benchmarking_option_set = true;
-				is_latencies = true;
+				report_per_command = true;
 				break;
 			case 's':
 				scale_given = true;
@@ -5379,6 +6064,20 @@ main(int argc, char **argv)
 							(errmsg("error while setting random seed from --random-seed option\n")));
 				}
 				break;
+			case 10:			/* max-tries */
+				{
+					int32		max_tries_arg = atoi(optarg);
+
+					if (max_tries_arg <= 0)
+					{
+						ereport(ELEVEL_FATAL,
+								(errmsg("invalid number of maximum tries: \"%s\"\n",
+										optarg)));
+					}
+					benchmarking_option_set = true;
+					max_tries = (uint32) max_tries_arg;
+				}
+				break;
 			default:
 				ereport(ELEVEL_FATAL,
 						(errmsg(_("Try \"%s --help\" for more information.\n"),
@@ -5551,6 +6250,10 @@ main(int argc, char **argv)
 				(errmsg("--progress-timestamp is allowed only under --progress\n")));
 	}
 
+	/* If necessary set the default tries limit  */
+	if (!max_tries && !latency_limit)
+		max_tries = 1;
+
 	/*
 	 * save main process id in the global variable because process id will be
 	 * changed after fork.
@@ -5635,7 +6338,7 @@ main(int argc, char **argv)
 								  PQdb(con));
 			}
 
-			ereport(ELEVEL_LOG, (errmsg("%s", errmsg_buf.data)));
+			ereport(ELEVEL_LOG_MAIN, (errmsg("%s", errmsg_buf.data)));
 			termPQExpBuffer(&errmsg_buf);
 			exit(1);
 		}
@@ -5650,7 +6353,7 @@ main(int argc, char **argv)
 
 		/* warn if we override user-given -s switch */
 		if (scale_given)
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("scale option ignored, using count from pgbench_branches table (%d)\n",
 							scale)));
 	}
@@ -5702,18 +6405,18 @@ main(int argc, char **argv)
 
 	if (!is_no_vacuum)
 	{
-		ereport(ELEVEL_LOG, (errmsg("starting vacuum...")));
+		ereport(ELEVEL_LOG_MAIN, (errmsg("starting vacuum...")));
 		tryExecuteStatement(con, "vacuum pgbench_branches");
 		tryExecuteStatement(con, "vacuum pgbench_tellers");
 		tryExecuteStatement(con, "truncate pgbench_history");
-		ereport(ELEVEL_LOG, (errmsg("end.\n")));
+		ereport(ELEVEL_LOG_MAIN, (errmsg("end.\n")));
 
 		if (do_vacuum_accounts)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("starting vacuum pgbench_accounts...")));
 			tryExecuteStatement(con, "vacuum analyze pgbench_accounts");
-			ereport(ELEVEL_LOG, (errmsg("end.\n")));
+			ereport(ELEVEL_LOG_MAIN, (errmsg("end.\n")));
 		}
 	}
 	PQfinish(con);
@@ -5813,6 +6516,10 @@ main(int argc, char **argv)
 		mergeSimpleStats(&stats.lag, &thread->stats.lag);
 		stats.cnt += thread->stats.cnt;
 		stats.skipped += thread->stats.skipped;
+		stats.retries += thread->stats.retries;
+		stats.retried += thread->stats.retried;
+		stats.errors += thread->stats.errors;
+		stats.errors_in_failed_tx += thread->stats.errors_in_failed_tx;
 		latency_late += thread->latency_late;
 		INSTR_TIME_ADD(conn_total_time, thread->conn_time);
 	}
@@ -5882,7 +6589,7 @@ threadRun(void *arg)
 
 		if (thread->logfile == NULL)
 		{
-			ereport(ELEVEL_LOG,
+			ereport(ELEVEL_LOG_MAIN,
 					(errmsg("could not open logfile \"%s\": %s\n",
 							logpath, strerror(errno))));
 			goto done;
@@ -5962,7 +6669,7 @@ threadRun(void *arg)
 
 				if (sock < 0)
 				{
-					ereport(ELEVEL_LOG,
+					ereport(ELEVEL_LOG_MAIN,
 							(errmsg("invalid socket: %s",
 									PQerrorMessage(st->con))));
 					goto done;
@@ -6040,7 +6747,7 @@ threadRun(void *arg)
 					continue;
 				}
 				/* must be something wrong */
-				ereport(ELEVEL_LOG,
+				ereport(ELEVEL_LOG_MAIN,
 						(errmsg("select() failed: %s\n", strerror(errno))));
 				goto done;
 			}
@@ -6065,7 +6772,7 @@ threadRun(void *arg)
 
 				if (sock < 0)
 				{
-					ereport(ELEVEL_LOG,
+					ereport(ELEVEL_LOG_MAIN,
 							(errmsg("invalid socket: %s",
 									PQerrorMessage(st->con))));
 					goto done;
@@ -6101,7 +6808,11 @@ threadRun(void *arg)
 				/* generate and show report */
 				StatsData	cur;
 				int64		run = now - last_report,
-							ntx;
+							ntx,
+							retries,
+							retried,
+							errors,
+							errors_in_failed_tx;
 				double		tps,
 							total_run,
 							latency,
@@ -6129,6 +6840,11 @@ threadRun(void *arg)
 					mergeSimpleStats(&cur.lag, &thread[i].stats.lag);
 					cur.cnt += thread[i].stats.cnt;
 					cur.skipped += thread[i].stats.skipped;
+					cur.retries += thread[i].stats.retries;
+					cur.retried += thread[i].stats.retried;
+					cur.errors += thread[i].stats.errors;
+					cur.errors_in_failed_tx +=
+						thread[i].stats.errors_in_failed_tx;
 				}
 
 				/* we count only actually executed transactions */
@@ -6146,6 +6862,11 @@ threadRun(void *arg)
 				{
 					latency = sqlat = stdev = lag = 0;
 				}
+				retries = cur.retries - last.retries;
+				retried = cur.retried - last.retried;
+				errors = cur.errors - last.errors;
+				errors_in_failed_tx = cur.errors_in_failed_tx -
+					last.errors_in_failed_tx;
 
 				if (progress_timestamp)
 				{
@@ -6172,6 +6893,16 @@ threadRun(void *arg)
 								  "progress: %s, %.1f tps, lat %.3f ms stddev %.3f",
 								  tbuf, tps, latency, stdev);
 
+				if (errors > 0)
+				{
+					appendPQExpBuffer(&progress_buf,
+									  ", " INT64_FORMAT " failed" , errors);
+					if (errors_in_failed_tx > 0)
+						appendPQExpBuffer(&progress_buf,
+										  " (" INT64_FORMAT " in failed tx)",
+										  errors_in_failed_tx);
+				}
+
 				if (throttle_delay)
 				{
 					appendPQExpBuffer(&progress_buf, ", lag %.3f ms", lag);
@@ -6180,9 +6911,20 @@ threadRun(void *arg)
 										  ", " INT64_FORMAT " skipped",
 										  cur.skipped - last.skipped);
 				}
+
+				/*
+				 * It can be non-zero only if max_tries is greater than one or
+				 * latency_limit is used.
+				 */
+				if (retried > 0)
+				{
+					appendPQExpBuffer(&progress_buf,
+									  ", " INT64_FORMAT " retried, " INT64_FORMAT " retries",
+									  retried, retries);
+				}
 				appendPQExpBufferChar(&progress_buf, '\n');
 
-				ereport(ELEVEL_LOG, (errmsg("%s", progress_buf.data)));
+				ereport(ELEVEL_LOG_MAIN, (errmsg("%s", progress_buf.data)));
 				termPQExpBuffer(&progress_buf);
 
 				last = cur;
@@ -6368,12 +7110,21 @@ errstartImpl(ErrorLevel elevel)
 			 * Print the message only if there's a debugging mode for all types
 			 * of messages.
 			 */
-			start_error_reporting = debug;
+			start_error_reporting = debug_level >= DEBUG_ALL;
+			break;
+		case ELEVEL_LOG_CLIENT_FAIL:
+			/*
+			 * Print a failure message only if there's at least a debugging mode
+			 * for fails.
+			 */
+			start_error_reporting = debug_level >= DEBUG_FAILS;
 			break;
-		case ELEVEL_LOG:
+		case ELEVEL_LOG_CLIENT_ABORTED:
+		case ELEVEL_LOG_MAIN:
 		case ELEVEL_FATAL:
 			/*
-			 * Always print the error/log message.
+			 * Always print an error message if the client is aborted or this is
+			 * the main program error/log message.
 			 */
 			start_error_reporting = true;
 			break;
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 00fb04f..9a0ea00 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -132,7 +132,8 @@ pgbench(
 		qr{builtin: TPC-B},
 		qr{clients: 2\b},
 		qr{processed: 10/10},
-		qr{mode: simple}
+		qr{mode: simple},
+		qr{maximum number of tries: 1}
 	],
 	[qr{^$}],
 	'pgbench tpcb-like');
@@ -151,7 +152,7 @@ pgbench(
 	'pgbench simple update');
 
 pgbench(
-	'-t 100 -c 7 -M prepared -b se --debug',
+	'-t 100 -c 7 -M prepared -b se --debug all',
 	0,
 	[
 		qr{builtin: select only},
@@ -546,6 +547,11 @@ my @errors = (
 SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i);
 }
 	],
+	[   'sql division by zero', 0, [qr{ERROR:  division by zero}],
+		q{-- SQL division by zero
+SELECT 1 / 0;
+}
+	],
 
 	# SHELL
 	[
@@ -718,6 +724,17 @@ SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i);
 		[qr{unrecognized time unit}], q{\sleep 1 week}
 	],
 
+	# CONDITIONAL BLOCKS
+	[   'if elif failed conditions', 0,
+		[qr{division by zero}],
+		q{-- failed conditions
+\if 1 / 0
+\elif 1 / 0
+\else
+\endif
+}
+	],
+
 	# MISC
 	[
 		'misc invalid backslash command',         1,
@@ -736,14 +753,33 @@ for my $e (@errors)
 	my $n = '001_pgbench_error_' . $name;
 	$n =~ s/ /_/g;
 	pgbench(
-		'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared',
+		'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared -d fails',
 		$status,
-		[ $status ? qr{^$} : qr{processed: 0/1} ],
+		($status ?
+		 [ qr{^$} ] :
+		 [ qr{processed: 0/1}, qr{number of errors: 1 \(100.000%\)},
+		   qr{^((?!number of retried)(.|\n))*$} ]),
 		$re,
 		'pgbench script error: ' . $name,
 		{ $n => $script });
 }
 
+# reset client variables in case of failure
+pgbench(
+	'-n -t 2 -d fails', 0,
+	[ qr{processed: 0/2}, qr{number of errors: 2 \(100.000%\)},
+	  qr{^((?!number of retried)(.|\n))*$} ],
+	[ qr{(client 0 got a failure in command 1 \(SQL\) of script 0; ERROR:  syntax error at or near ":"(.|\n)*){2}} ],
+	'pgbench reset client variables in case of failure',
+	{	'001_pgbench_reset_client_variables' => q{
+BEGIN;
+-- select an unassigned variable
+SELECT :unassigned_var;
+\set unassigned_var 1
+END;
+}
+	});
+
 # zipfian cache array overflow
 pgbench(
 	'-t 1', 0,
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index a9e067b..b262d5d 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -59,7 +59,7 @@ my @options = (
 	# name, options, stderr checks
 	[
 		'bad option',
-		'-h home -p 5432 -U calvin -d --bad-option',
+		'-h home -p 5432 -U calvin -d all --bad-option',
 		[ qr{(unrecognized|illegal) option}, qr{--help.*more information} ]
 	],
 	[
@@ -151,6 +151,11 @@ my @options = (
 			qr{error while setting random seed from --random-seed option}
 		]
 	],
+	[
+		'bad maximum number of tries',
+		'--max-tries -10',
+		[ qr{invalid number of maximum tries: "-10"} ]
+	],
 
 	# loging sub-options
 	[
diff --git a/src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl b/src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl
new file mode 100644
index 0000000..5e45cb1
--- /dev/null
+++ b/src/bin/pgbench/t/003_serialization_and_deadlock_fails.pl
@@ -0,0 +1,761 @@
+use strict;
+use warnings;
+
+use Config;
+use PostgresNode;
+use TestLib;
+use Test::More tests => 34;
+
+use constant
+{
+	READ_COMMITTED   => 0,
+	REPEATABLE_READ  => 1,
+	SERIALIZABLE     => 2,
+};
+
+my @isolation_level_shell = (
+	'read\\ committed',
+	'repeatable\\ read',
+	'serializable');
+
+# The keys of advisory locks for testing deadlock failures:
+use constant
+{
+	DEADLOCK_1         => 3,
+	WAIT_PGBENCH_2     => 4,
+	DEADLOCK_2         => 5,
+	TRANSACTION_ENDS_1 => 6,
+	TRANSACTION_ENDS_2 => 7,
+};
+
+# Test concurrent update in table row.
+my $node = get_new_node('main');
+$node->init;
+$node->start;
+$node->safe_psql('postgres',
+    'CREATE UNLOGGED TABLE xy (x integer, y integer); '
+  . 'INSERT INTO xy VALUES (1, 2), (2, 3);');
+
+my $script_serialization = $node->basedir . '/pgbench_script_serialization';
+append_to_file($script_serialization,
+		"\\set delta random(-5000, 5000)\n"
+	  . "BEGIN;\n"
+	  . "SELECT pg_sleep(1);\n"
+	  . "UPDATE xy SET y = y + :delta "
+	  . "WHERE x = 1 AND pg_advisory_lock(0) IS NOT NULL;\n"
+	  . "SELECT pg_advisory_unlock_all();\n"
+	  . "END;\n");
+
+my $script_deadlocks1 = $node->basedir . '/pgbench_script_deadlocks1';
+append_to_file($script_deadlocks1,
+		"BEGIN;\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_1 . ");\n"
+	  . "SELECT pg_advisory_lock(" . WAIT_PGBENCH_2 . ");\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_2 . ");\n"
+	  . "END;\n"
+	  . "SELECT pg_advisory_unlock_all();\n"
+	  . "SELECT pg_advisory_lock(" . TRANSACTION_ENDS_1 . ");\n"
+	  . "SELECT pg_advisory_unlock_all();");
+
+my $script_deadlocks2 = $node->basedir . '/pgbench_script_deadlocks2';
+append_to_file($script_deadlocks2,
+		"BEGIN;\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_2 . ");\n"
+	  . "SELECT pg_advisory_lock(" . DEADLOCK_1 . ");\n"
+	  . "END;\n"
+	  . "SELECT pg_advisory_unlock_all();\n"
+	  . "SELECT pg_advisory_lock(" . TRANSACTION_ENDS_2 . ");\n"
+	  . "SELECT pg_advisory_unlock_all();");
+
+sub test_pgbench_serialization_errors
+{
+	my ($max_tries, $latency_limit, $test_name) = @_;
+
+	my $isolation_level = REPEATABLE_READ;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h_pgbench, $in_pgbench, $out_pgbench, $err_pgbench);
+
+	# Open a psql session, run a parallel transaction and aquire an advisory
+	# lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql = "begin;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /BEGIN/;
+
+	$in_psql =
+		"update xy set y = y + 1 "
+	  . "where x = 1 and pg_advisory_lock(0) is not null;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /UPDATE 1/;
+
+	my $retry_options =
+		($max_tries ? "--max-tries $max_tries" : "")
+	  . ($latency_limit ? "--latency-limit $latency_limit" : "");
+
+	# Start pgbench:
+	my @command = (
+		qw(pgbench --no-vacuum --transactions 1 --debug fails --file),
+		$script_serialization,
+		split /\s+/, $retry_options);
+	print "# Running: " . join(" ", @command) . "\n";
+	$h_pgbench = IPC::Run::start \@command, \$in_pgbench, \$out_pgbench,
+	  \$err_pgbench;
+
+	# Wait until pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select * from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = 0::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /1 row/);
+
+	# In psql, commit the transaction, release advisory locks and end the
+	# session:
+	$in_psql = "end;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /COMMIT/;
+
+	$in_psql = "select pg_advisory_unlock_all();\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_all/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get pgbench results
+	$h_pgbench->pump() until length $out_pgbench;
+	$h_pgbench->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h_pgbench->full_results)[0]
+	  : $h_pgbench->result(0);
+
+	# Check pgbench results
+	ok(!$result, "@command exit code 0");
+
+	like($out_pgbench,
+		qr{processed: 0/1},
+		"$test_name: check processed transactions");
+
+	like($out_pgbench,
+		qr{number of errors: 1 \(100\.000%\)},
+		"$test_name: check errors");
+
+	like($out_pgbench,
+		qr{^((?!number of retried)(.|\n))*$},
+		"$test_name: check retried");
+
+	if ($max_tries)
+	{
+		like($out_pgbench,
+			qr{maximum number of tries: $max_tries},
+			"$test_name: check the maximum number of tries");
+	}
+	else
+	{
+		like($out_pgbench,
+			qr{^((?!maximum number of tries)(.|\n))*$},
+			"$test_name: check the maximum number of tries");
+	}
+
+	if ($latency_limit)
+	{
+		like($out_pgbench,
+			qr{number of transactions above the $latency_limit\.0 ms latency limit: 1/1 \(100.000 \%\) \(including errors\)},
+			"$test_name: check transactions above latency limit");
+	}
+	else
+	{
+		like($out_pgbench,
+			qr{^((?!latency limit)(.|\n))*$},
+			"$test_name: check transactions above latency limit");
+	}
+
+	my $pattern =
+		"client 0 got a failure in command 3 \\(SQL\\) of script 0; "
+	  . "ERROR:  could not serialize access due to concurrent update";
+
+	like($err_pgbench,
+		qr{$pattern},
+		"$test_name: check serialization failure");
+}
+
+sub test_pgbench_serialization_failures
+{
+	my $isolation_level = REPEATABLE_READ;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h_pgbench, $in_pgbench, $out_pgbench, $err_pgbench);
+
+	# Open a psql session, run a parallel transaction and aquire an advisory
+	# lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql = "begin;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /BEGIN/;
+
+	$in_psql =
+		"update xy set y = y + 1 "
+	  . "where x = 1 and pg_advisory_lock(0) is not null;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /UPDATE 1/;
+
+	# Start pgbench:
+	my @command = (
+		qw(pgbench --no-vacuum --transactions 1 --debug all --max-tries 2),
+		"--file",
+		$script_serialization);
+	print "# Running: " . join(" ", @command) . "\n";
+	$h_pgbench = IPC::Run::start \@command, \$in_pgbench, \$out_pgbench,
+	  \$err_pgbench;
+
+	# Wait until pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select * from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = 0::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /1 row/);
+
+	# In psql, commit the transaction, release advisory locks and end the
+	# session:
+	$in_psql = "end;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /COMMIT/;
+
+	$in_psql = "select pg_advisory_unlock_all();\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_all/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get pgbench results
+	$h_pgbench->pump() until length $out_pgbench;
+	$h_pgbench->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h_pgbench->full_results)[0]
+	  : $h_pgbench->result(0);
+
+	# Check pgbench results
+	ok(!$result, "@command exit code 0");
+
+	like($out_pgbench,
+		qr{processed: 1/1},
+		"concurrent update with retrying: check processed transactions");
+
+	like($out_pgbench,
+		qr{^((?!number of errors)(.|\n))*$},
+		"concurrent update with retrying: check errors");
+
+	like($out_pgbench,
+		qr{number of retried: 1 \(100\.000%\)},
+		"concurrent update with retrying: check retried");
+
+	like($out_pgbench,
+		qr{number of retries: 1},
+		"concurrent update with retrying: check retries");
+
+	like($out_pgbench,
+		qr{latency average = \d+\.\d{3} ms\n},
+		"concurrent update with retrying: check latency average");
+
+	my $pattern =
+		"client 0 sending UPDATE xy SET y = y \\+ (-?\\d+) "
+	  . "WHERE x = 1 AND pg_advisory_lock\\(0\\) IS NOT NULL;\n"
+	  . "(client 0 receiving\n)+"
+	  . "client 0 got a failure in command 3 \\(SQL\\) of script 0; "
+	  . "ERROR:  could not serialize access due to concurrent update\n\n"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g2+"
+	  . "client 0 continues a failed transaction in command 4 \\(SQL\\) of script 0; "
+	  . "ERROR:  current transaction is aborted, commands ignored until end of transaction block\n\n"
+	  . "client 0 sending END;\n"
+	  . "\\g2+"
+	  . "client 0 repeats the failed transaction \\(try 2/2\\)\n"
+	  . "client 0 executing \\\\set delta\n"
+	  . "client 0 sending BEGIN;\n"
+	  . "\\g2+"
+	  . "client 0 sending SELECT pg_sleep\\(1\\);\n"
+	  . "\\g2+"
+	  . "client 0 sending UPDATE xy SET y = y \\+ \\g1 "
+	  . "WHERE x = 1 AND pg_advisory_lock\\(0\\) IS NOT NULL;";
+
+	like($err_pgbench,
+		qr{$pattern},
+		"concurrent update with retrying: check the retried transaction");
+}
+
+sub test_pgbench_deadlock_errors
+{
+	my $isolation_level = READ_COMMITTED;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h1, $in1, $out1, $err1);
+	my ($h2, $in2, $out2, $err2);
+
+	# Open a psql session and aquire an advisory lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql =
+		"select pg_advisory_lock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_lock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	# Run the first pgbench:
+	my @command1 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug fails --file),
+		$script_deadlocks1);
+	print "# Running: " . join(" ", @command1) . "\n";
+	$h1 = IPC::Run::start \@command1, \$in1, \$out1, \$err1;
+
+	# Wait until the first pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . WAIT_PGBENCH_2 . "_zero' "
+		  . "else '" . WAIT_PGBENCH_2 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . WAIT_PGBENCH_2
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ WAIT_PGBENCH_2 ]}_not_zero/);
+
+	# Run the second pgbench:
+	my @command2 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug fails --file),
+		$script_deadlocks2);
+	print "# Running: " . join(" ", @command2) . "\n";
+	$h2 = IPC::Run::start \@command2, \$in2, \$out2, \$err2;
+
+	# Wait until the second pgbench tries to acquire the lock held by the first
+	# pgbench:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . DEADLOCK_1 . "_zero' "
+		  . "else '" . DEADLOCK_1 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . DEADLOCK_1
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ DEADLOCK_1 ]}_not_zero/);
+
+	# In the psql session, release the lock that the first pgbench is waiting
+	# for and end the session:
+	$in_psql =
+		"select pg_advisory_unlock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_unlock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get results from all pgbenches:
+	$h1->pump() until length $out1;
+	$h1->finish();
+
+	$h2->pump() until length $out2;
+	$h2->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result1 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h1->full_results)[0]
+	  : $h1->result(0);
+
+	my $result2 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h2->full_results)[0]
+	  : $h2->result(0);
+
+	# Check all pgbench results
+	ok(!$result1, "@command1 exit code 0");
+	ok(!$result2, "@command2 exit code 0");
+
+	# The first or second pgbench should get a deadlock error
+	ok((($out1 =~ /processed: 0\/1/ and $out2 =~ /processed: 1\/1/) or
+		($out2 =~ /processed: 0\/1/ and $out1 =~ /processed: 1\/1/)),
+		"concurrent deadlock update: check processed transactions");
+
+	ok((($out1 =~ /number of errors: 1 \(100\.000%\)/ and
+		 $out2 =~ /^((?!number of errors)(.|\n))*$/) or
+		($out2 =~ /number of errors: 1 \(100\.000%\)/ and
+		 $out1 =~ /^((?!number of errors)(.|\n))*$/)),
+		"concurrent deadlock update: check errors");
+
+	ok(($err1 =~ /client 0 got a failure in command 3 \(SQL\) of script 0; ERROR:  deadlock detected/ or
+		$err2 =~ /client 0 got a failure in command 2 \(SQL\) of script 0; ERROR:  deadlock detected/),
+		"concurrent deadlock update: check deadlock failure");
+
+	# Both pgbenches do not have retried transactions
+	like($out1 . $out2,
+		qr{^((?!number of retried)(.|\n))*$},
+		"concurrent deadlock update: check retried");
+}
+
+sub test_pgbench_deadlock_failures
+{
+	my $isolation_level = READ_COMMITTED;
+	my $isolation_level_shell = $isolation_level_shell[$isolation_level];
+
+	local $ENV{PGPORT} = $node->port;
+	local $ENV{PGOPTIONS} =
+		"-c default_transaction_isolation=" . $isolation_level_shell;
+	print "# PGOPTIONS: " . $ENV{PGOPTIONS} . "\n";
+
+	my ($h_psql, $in_psql, $out_psql);
+	my ($h1, $in1, $out1, $err1);
+	my ($h2, $in2, $out2, $err2);
+
+	# Open a psql session and aquire an advisory lock:
+	print "# Starting psql\n";
+	$h_psql = IPC::Run::start [ 'psql' ], \$in_psql, \$out_psql;
+
+	$in_psql =
+		"select pg_advisory_lock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_lock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	# Run the first pgbench:
+	my @command1 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug all --max-tries 2),
+		"--file",
+		$script_deadlocks1);
+	print "# Running: " . join(" ", @command1) . "\n";
+	$h1 = IPC::Run::start \@command1, \$in1, \$out1, \$err1;
+
+	# Wait until the first pgbench also tries to acquire the same advisory lock:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . WAIT_PGBENCH_2 . "_zero' "
+		  . "else '" . WAIT_PGBENCH_2 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . WAIT_PGBENCH_2
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ WAIT_PGBENCH_2 ]}_not_zero/);
+
+	# Run the second pgbench:
+	my @command2 = (
+		qw(pgbench --no-vacuum --transactions 1 --debug all --max-tries 2),
+		"--file",
+		$script_deadlocks2);
+	print "# Running: " . join(" ", @command2) . "\n";
+	$h2 = IPC::Run::start \@command2, \$in2, \$out2, \$err2;
+
+	# Wait until the second pgbench tries to acquire the lock held by the first
+	# pgbench:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . DEADLOCK_1 . "_zero' "
+		  . "else '" . DEADLOCK_1 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . DEADLOCK_1
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ DEADLOCK_1 ]}_not_zero/);
+
+	# In the psql session, acquire the locks that pgbenches will wait for:
+	$in_psql =
+		"select pg_advisory_lock(" . TRANSACTION_ENDS_1 . ") "
+	  . "as pg_advisory_lock_" . TRANSACTION_ENDS_1 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ TRANSACTION_ENDS_1 ]}/;
+
+	$in_psql =
+		"select pg_advisory_lock(" . TRANSACTION_ENDS_2 . ") "
+	  . "as pg_advisory_lock_" . TRANSACTION_ENDS_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_lock_@{[ TRANSACTION_ENDS_2 ]}/;
+
+	# In the psql session, release the lock that the first pgbench is waiting
+	# for:
+	$in_psql =
+		"select pg_advisory_unlock(" . WAIT_PGBENCH_2 . ") "
+	  . "as pg_advisory_unlock_" . WAIT_PGBENCH_2 . ";\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_@{[ WAIT_PGBENCH_2 ]}/;
+
+	# Wait until pgbenches try to acquire the locks held by the psql session:
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . TRANSACTION_ENDS_1 . "_zero' "
+		  . "else '" . TRANSACTION_ENDS_1 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . TRANSACTION_ENDS_1
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ TRANSACTION_ENDS_1 ]}_not_zero/);
+
+	do
+	{
+		$in_psql =
+			"select case count(*) "
+		  . "when 0 then '" . TRANSACTION_ENDS_2 . "_zero' "
+		  . "else '" . TRANSACTION_ENDS_2 . "_not_zero' end "
+		  . "from pg_locks where "
+		  . "locktype = 'advisory' and "
+		  . "objsubid = 1 and "
+		  . "((classid::bigint << 32) | objid::bigint = "
+		  . TRANSACTION_ENDS_2
+		  . "::bigint) and "
+		  . "not granted;\n";
+		print "# Running in psql: " . join(" ", $in_psql);
+		$h_psql->pump() while length $in_psql;
+	} while ($out_psql !~ /@{[ TRANSACTION_ENDS_2 ]}_not_zero/);
+
+	# In the psql session, release advisory locks and end the session:
+	$in_psql = "select pg_advisory_unlock_all() as pg_advisory_unlock_all;\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() until $out_psql =~ /pg_advisory_unlock_all/;
+
+	$in_psql = "\\q\n";
+	print "# Running in psql: " . join(" ", $in_psql);
+	$h_psql->pump() while length $in_psql;
+
+	$h_psql->finish();
+
+	# Get results from all pgbenches:
+	$h1->pump() until length $out1;
+	$h1->finish();
+
+	$h2->pump() until length $out2;
+	$h2->finish();
+
+	# On Windows, the exit status of the process is returned directly as the
+	# process's exit code, while on Unix, it's returned in the high bits
+	# of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+	# header file). IPC::Run's result function always returns exit code >> 8,
+	# assuming the Unix convention, which will always return 0 on Windows as
+	# long as the process was not terminated by an exception. To work around
+	# that, use $h->full_result on Windows instead.
+	my $result1 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h1->full_results)[0]
+	  : $h1->result(0);
+
+	my $result2 =
+	    ($Config{osname} eq "MSWin32")
+	  ? ($h2->full_results)[0]
+	  : $h2->result(0);
+
+	# Check all pgbench results
+	ok(!$result1, "@command1 exit code 0");
+	ok(!$result2, "@command2 exit code 0");
+
+	like($out1,
+		qr{processed: 1/1},
+		"concurrent deadlock update with retrying: pgbench 1: "
+	  . "check processed transactions");
+	like($out2,
+		qr{processed: 1/1},
+		"concurrent deadlock update with retrying: pgbench 2: "
+	  . "check processed transactions");
+
+	# The first or second pgbench should get a deadlock error which was retried:
+	like($out1 . $out2,
+		qr{^((?!number of errors)(.|\n))*$},
+		"concurrent deadlock update with retrying: check errors");
+
+	ok((($out1 =~ /number of retried: 1 \(100\.000%\)/ and
+		 $out2 =~ /^((?!number of retried)(.|\n))*$/) or
+		($out2 =~ /number of retried: 1 \(100\.000%\)/ and
+		 $out1 =~ /^((?!number of retried)(.|\n))*$/)),
+		"concurrent deadlock update with retrying: check retries");
+
+	my $pattern1 =
+		"client 0 sending BEGIN;\n"
+	  . "(client 0 receiving\n)+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . WAIT_PGBENCH_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 got a failure in command 3 \\(SQL\\) of script 0; "
+	  . "ERROR:  deadlock detected\n"
+	  . "((?!client 0)(.|\n))*"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 repeats the failed transaction \\(try 2/2\\)\n"
+	  . "client 0 sending BEGIN;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . WAIT_PGBENCH_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+";
+
+	my $pattern2 =
+		"client 0 sending BEGIN;\n"
+	  . "(client 0 receiving\n)+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 got a failure in command 2 \\(SQL\\) of script 0; "
+	  . "ERROR:  deadlock detected\n"
+	  . "((?!client 0)(.|\n))*"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 repeats the failed transaction \\(try 2/2\\)\n"
+	  . "client 0 sending BEGIN;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . DEADLOCK_1 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending END;\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_lock\\(" . TRANSACTION_ENDS_2 . "\\);\n"
+	  . "\\g1+"
+	  . "client 0 sending SELECT pg_advisory_unlock_all\\(\\);\n"
+	  . "\\g1+";
+
+	ok(($err1 =~ /$pattern1/ or $err2 =~ /$pattern2/),
+		"concurrent deadlock update with retrying: "
+	  . "check the retried transaction");
+}
+
+test_pgbench_serialization_errors(
+								1,      # --max-tries
+								0,      # --latency-limit (will not be used)
+								"concurrent update");
+test_pgbench_serialization_errors(
+								0,	    # --max-tries (will not be used)
+								900,    # --latency-limit
+								"concurrent update with maximum time of tries");
+
+test_pgbench_serialization_failures();
+
+test_pgbench_deadlock_errors();
+test_pgbench_deadlock_failures();
+
+#done
+$node->stop;
-- 
2.7.4

