This is an automated email from the ASF dual-hosted git repository.

kparzysz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 3756b716d5 [Relay] Use f-strings for string formatting, NFC (#14838)
3756b716d5 is described below

commit 3756b716d537a158f668794555d5193e29100ad0
Author: Krzysztof Parzyszek <kparz...@quicinc.com>
AuthorDate: Sat May 13 09:20:21 2023 -0500

    [Relay] Use f-strings for string formatting, NFC (#14838)
    
    * [Relay] Use f-strings for string formatting, NFC
    
    Replace uses of % and .format() with f-strings.
    
    Reformat modified files.
    
    * Fix typo in python/tvm/relay/frontend/tensorflow_ops.py
    
    `0[s0_size] -> s0[s0_size]`
---
 python/tvm/relay/backend/interpreter.py            |  11 ++-
 python/tvm/relay/backend/te_compiler.py            |   4 +-
 python/tvm/relay/build_module.py                   |   2 +-
 python/tvm/relay/expr.py                           |  69 +++++---------
 python/tvm/relay/expr_functor.py                   |   8 +-
 python/tvm/relay/frontend/tensorflow_ops.py        |   2 +-
 python/tvm/relay/loops.py                          |   2 +-
 python/tvm/relay/prelude.py                        |  76 +++++-----------
 python/tvm/relay/qnn/op/layout_conversions.py      |   4 +-
 python/tvm/relay/qnn/op/qnn.py                     |  81 +++--------------
 python/tvm/relay/quantize/_calibrate.py            |   4 +-
 python/tvm/relay/quantize/quantize.py              |   4 +-
 python/tvm/relay/testing/dcgan.py                  |   4 +-
 python/tvm/relay/testing/densenet.py               |  12 +--
 python/tvm/relay/testing/inception_v3.py           | 100 +++++++++------------
 python/tvm/relay/testing/init.py                   |  11 ++-
 python/tvm/relay/testing/layers.py                 |  12 +--
 python/tvm/relay/testing/lstm.py                   |  20 ++---
 python/tvm/relay/testing/mobilenet.py              |   2 +-
 python/tvm/relay/testing/py_converter.py           |  21 ++---
 python/tvm/relay/testing/resnet.py                 |   8 +-
 python/tvm/relay/testing/resnet_3d.py              |   8 +-
 python/tvm/relay/testing/squeezenet.py             |  17 ++--
 python/tvm/relay/testing/tf.py                     |   8 +-
 python/tvm/relay/testing/tflite.py                 |   2 +-
 python/tvm/relay/testing/vgg.py                    |   8 +-
 .../transform/fake_quantization_to_integer.py      |  30 ++-----
 python/tvm/relay/type_functor.py                   |   2 +-
 28 files changed, 185 insertions(+), 347 deletions(-)

diff --git a/python/tvm/relay/backend/interpreter.py 
b/python/tvm/relay/backend/interpreter.py
index e4da6f447f..80a8880fbc 100644
--- a/python/tvm/relay/backend/interpreter.py
+++ b/python/tvm/relay/backend/interpreter.py
@@ -99,7 +99,7 @@ class Executor(object):
 
         if kwargs and not isinstance(expr, Function):
             raise Exception(
-                "can only supply keyword parameters for a " "relay.Function, 
found {0}".format(expr)
+                f"can only supply keyword parameters for a relay.Function, 
found {expr}"
             )
 
         params = expr.params
@@ -111,17 +111,16 @@ class Executor(object):
             if i < num_of_args:
                 if kwargs.get(name):
                     raise Exception(
-                        "duplicate argument supplied in "
-                        "both positional args (at position: {0}), "
-                        "and keyword argument (with name: {1})".format(i, name)
+                        f"duplicate argument supplied in "
+                        f"both positional args (at position: {i}), "
+                        f"and keyword argument (with name: {name})"
                     )
             else:
                 cargs.append(kwargs[name])
 
         if len(cargs) != len(params):
             raise Exception(
-                "insufficient arguments, expected "
-                "{0}, provided {1}".format(len(cargs), len(params))
+                f"insufficient arguments, expected " f"{len(cargs)}, provided 
{len(params)}"
             )
 
         return tuple(cargs)
diff --git a/python/tvm/relay/backend/te_compiler.py 
b/python/tvm/relay/backend/te_compiler.py
index 814e793290..84e4ecbaec 100644
--- a/python/tvm/relay/backend/te_compiler.py
+++ b/python/tvm/relay/backend/te_compiler.py
@@ -111,8 +111,8 @@ def get_valid_implementations(op, attrs, inputs, out_type, 
target):
     """
     fstrategy = op.get_attr("FTVMStrategy")
     assert fstrategy is not None, (
-        "%s doesn't have an FTVMStrategy registered. You can register "
-        "one in python with `tvm.relay.op.register_strategy`." % op.name
+        f"{op.name} doesn't have an FTVMStrategy registered. You can register "
+        f"one in python with `tvm.relay.op.register_strategy`."
     )
     with target:
         strategy = fstrategy(attrs, inputs, out_type, target)
diff --git a/python/tvm/relay/build_module.py b/python/tvm/relay/build_module.py
index f2feed9fd6..40a91cc75a 100644
--- a/python/tvm/relay/build_module.py
+++ b/python/tvm/relay/build_module.py
@@ -683,4 +683,4 @@ def create_executor(kind="debug", mod=None, device=None, 
target="llvm", params=N
         return VMExecutor(mod, device, raw_targets)
     if kind == "aot":
         return AotExecutor(mod, device, raw_targets)
-    raise RuntimeError("unknown execution strategy: {0}".format(kind))
+    raise RuntimeError(f"unknown execution strategy: {kind}")
diff --git a/python/tvm/relay/expr.py b/python/tvm/relay/expr.py
index d8bca5c4a4..5239eaa883 100644
--- a/python/tvm/relay/expr.py
+++ b/python/tvm/relay/expr.py
@@ -97,41 +97,41 @@ class ExprWithOp(RelayExpr):
         if isinstance(other, Expr):
             return _op_make.less(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __gt__(self, other):
         if isinstance(other, Expr):
             return _op_make.greater(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __ge__(self, other):
         if isinstance(other, Expr):
             return _op_make.greater_equal(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __le__(self, other):
         if isinstance(other, Expr):
             return _op_make.less_equal(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __add__(self, other):
         if isinstance(other, Expr):
             return _op_make.add(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __radd__(self, other):
         return self.__add__(other)
@@ -140,22 +140,22 @@ class ExprWithOp(RelayExpr):
         if isinstance(other, Expr):
             return _op_make.subtract(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __rsub__(self, other):
         if isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
-        raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
+        raise TypeError(f"type {type(other)} not supported")
 
     def __mul__(self, other):
         if isinstance(other, Expr):
             return _op_make.multiply(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __rmul__(self, other):
         return self.__mul__(other)
@@ -164,14 +164,14 @@ class ExprWithOp(RelayExpr):
         if isinstance(other, Expr):
             return _op_make.divide(self, other)
         elif isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
         else:
-            raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f"type {type(other)} not supported")
 
     def __rdiv__(self, other):
         if isinstance(other, _Number):
-            raise TypeError('convert "%s" with `const` first' % str(other))
-        raise TypeError("type %s not supported" % str(type(other)))
+            raise TypeError(f'convert "{str(other)}" with `const` first')
+        raise TypeError(f"type {type(other)} not supported")
 
     def __truediv__(self, other):
         return self.__div__(other)
@@ -213,12 +213,7 @@ class Constant(ExprWithOp):
 
 
 @tvm._ffi.register_func("relay.ConstantWithFields")
-def ConstantWithFields(
-    constant,
-    data=None,
-    virtual_device=None,
-    span=None,
-):
+def ConstantWithFields(constant, data=None, virtual_device=None, span=None):
     """
     Returns constant with the given properties. A None property denotes 'no 
change'.
     Returns constant if all properties are unchanged. Otherwise, returns a 
copy with the new
@@ -467,12 +462,7 @@ class RefCreate(ExprWithOp):
 
 
 @tvm._ffi.register_func("relay.RefCreateWithFields")
-def RefCreateWithFields(
-    ref_create,
-    value=None,
-    virtual_device=None,
-    span=None,
-):
+def RefCreateWithFields(ref_create, value=None, virtual_device=None, 
span=None):
     """
     Returns ref_create with the given properties. A None property denotes 'no 
change'.
     Returns ref_create if all properties are unchanged. Otherwise, returns a 
copy with the new
@@ -498,12 +488,7 @@ class RefRead(ExprWithOp):
 
 
 @tvm._ffi.register_func("relay.RefReadWithFields")
-def RefReadWithFields(
-    ref_read,
-    ref=None,
-    virtual_device=None,
-    span=None,
-):
+def RefReadWithFields(ref_read, ref=None, virtual_device=None, span=None):
     """
     Returns ref_read with the given properties. A None property denotes 'no 
change'.
     Returns ref_read if all properties are unchanged. Otherwise, returns a 
copy with the new
@@ -534,13 +519,7 @@ class RefWrite(ExprWithOp):
 
 
 @tvm._ffi.register_func("relay.RefWriteWithFields")
-def RefWriteWithFields(
-    ref_write,
-    ref=None,
-    value=None,
-    virtual_device=None,
-    span=None,
-):
+def RefWriteWithFields(ref_write, ref=None, value=None, virtual_device=None, 
span=None):
     """
     Returns ref_write with the given properties. A None property denotes 'no 
change'.
     Returns ref_write if all properties are unchanged. Otherwise, returns a 
copy with the new
diff --git a/python/tvm/relay/expr_functor.py b/python/tvm/relay/expr_functor.py
index ebea344b41..95a8c79dc2 100644
--- a/python/tvm/relay/expr_functor.py
+++ b/python/tvm/relay/expr_functor.py
@@ -73,7 +73,7 @@ class ExprFunctor:
         elif isinstance(expr, Match):
             res = self.visit_match(expr)
         else:
-            raise Exception("warning unhandled case: {0}".format(type(expr)))
+            raise Exception(f"warning unhandled case: {type(expr)}")
 
         self.memo_map[expr] = res
 
@@ -204,11 +204,7 @@ class ExprMutator(ExprFunctor):
     def visit_function(self, fn):
         new_params = [self.visit(x) for x in fn.params]
         new_body = self.visit(fn.body)
-        return FunctionWithFields(
-            fn,
-            list(new_params),
-            new_body,
-        )
+        return FunctionWithFields(fn, list(new_params), new_body)
 
     def visit_let(self, let):
         new_var = self.visit(let.var)
diff --git a/python/tvm/relay/frontend/tensorflow_ops.py 
b/python/tvm/relay/frontend/tensorflow_ops.py
index 014d0065fc..e2c3a34252 100644
--- a/python/tvm/relay/frontend/tensorflow_ops.py
+++ b/python/tvm/relay/frontend/tensorflow_ops.py
@@ -1946,7 +1946,7 @@ def _broadcast_args():
             else:
                 assert (
                     s1[s1_size - i] == 1
-                ), f"Incompatible broadcast type {0[s0_size - i]} and 
{s1[s1_size - i]}"
+                ), f"Incompatible broadcast type {s0[s0_size - i]} and 
{s1[s1_size - i]}"
                 out.appendleft(s0[s0_size - i])
         if s0_size < s1_size:
             for i in range(s0_size + 1, s1_size + 1):
diff --git a/python/tvm/relay/loops.py b/python/tvm/relay/loops.py
index d46e34860f..61183fd531 100644
--- a/python/tvm/relay/loops.py
+++ b/python/tvm/relay/loops.py
@@ -53,7 +53,7 @@ def while_loop(cond, loop_vars, loop_bodies):
     fresh_vars = []
 
     for i, loop_var in enumerate(loop_vars):
-        name = loop_var.name_hint if isinstance(loop_var, _expr.Var) else 
"arg{}".format(i)
+        name = loop_var.name_hint if isinstance(loop_var, _expr.Var) else 
f"arg{i}"
         new_var = _expr.var(name, type_annotation=sb.type_of(loop_var), 
span=loop_var.span)
         fresh_vars.append(new_var)
 
diff --git a/python/tvm/relay/prelude.py b/python/tvm/relay/prelude.py
index f21e3eaf2c..0db639a3a8 100644
--- a/python/tvm/relay/prelude.py
+++ b/python/tvm/relay/prelude.py
@@ -59,9 +59,9 @@ def get_tensor_array_shape(expr, dtype, prelude):
     checked_type = mod["main"].body.checked_type
     assert isinstance(checked_type, TypeCall), "Input must be a tensor array."
     ta_type_str = checked_type.args[0].func.name_hint
-    static_ta_ty_start = "static_tensor_{}".format(dtype)
+    static_ta_ty_start = f"static_tensor_{dtype}"
     if ta_type_str.startswith(static_ta_ty_start):
-        shape_str = ta_type_str.replace("{}_".format(static_ta_ty_start), 
"").replace("_t", "")
+        shape_str = ta_type_str.replace(f"{static_ta_ty_start}_", 
"").replace("_t", "")
         shape = []
         if "scalar" not in shape_str:
             for dim_str in shape_str.split("_"):
@@ -104,18 +104,18 @@ def _get_name_static(canonical, dtype, shape, 
batch_dim=None, extra_shapes=None)
 
     if extra_shapes is not None:
         for n, s in extra_shapes.items():
-            extra_shape_str = "_{}_{}".format(n, _to_str(s))
+            extra_shape_str = f"_{n}_{_to_str(s)}"
             shape_str += extra_shape_str
 
     if len(shape_str) == 0:
         shape_str = "scalar"
     if canonical == "tensor_t":
-        return "static_tensor_{}_{}_t".format(dtype, shape_str)
+        return f"static_tensor_{dtype}_{shape_str}_t"
     if batch_dim is None or canonical in ["tensor_constructor", "tensor_nil"]:
-        return "{}_{}_{}".format(canonical, dtype, shape_str)
+        return f"{canonical}_{dtype}_{shape_str}"
     if batch_dim != 1:
-        return "{}_{}_{}".format(canonical, dtype, shape_str)
-    return "{}_{}_batch{}_{}".format(canonical, dtype, str(batch_dim), 
shape_str)
+        return f"{canonical}_{dtype}_{shape_str}"
+    return f"{canonical}_{dtype}_batch{batch_dim}_{shape_str}"
 
 
 def _to_str(shape):
@@ -224,9 +224,7 @@ class StaticTensorArrayOps(object):
 
         origin_tensor_constructor = self.get_ctor("tensor_constructor")
 
-        output_shape = [
-            Any(),
-        ] + list(self.shape[1:])
+        output_shape = [Any()] + list(self.shape[1:])
         tensor_type_var, tensor_constructor, _ = 
self._get_adt_by_shape(output_shape)
 
         t = Var("tensor", self.tensor_type_var())
@@ -255,9 +253,7 @@ class StaticTensorArrayOps(object):
         if self.is_cached(concat_name):
             return
 
-        output_shape = [
-            Any(),
-        ] + list(self.shape[1:])
+        output_shape = [Any()] + list(self.shape[1:])
         tensor_type_var, tensor_constructor, _ = 
self._get_adt_by_shape(output_shape)
 
         origin_tensor_constructor = self.get_ctor("tensor_constructor")
@@ -301,10 +297,7 @@ class StaticTensorArrayOps(object):
         # in stack op, we need to recursively concatenate.
         new_axis = Any() if self.batch_dim is None or self.batch_dim != 1 else 
self.batch_dim
         tensor_type_var, tensor_constructor, _ = self._get_adt_by_shape(
-            [
-                new_axis,
-            ]
-            + list(self.shape)
+            [new_axis] + list(self.shape)
         )
         t = Var("t")
         case = Clause(
@@ -497,9 +490,7 @@ class StaticTensorArrayOps(object):
             tensor_array_split_helper_var = 
GlobalVar(tensor_array_split_helper_name)
             split_var = GlobalVar(split_name)
 
-        output_shape = [
-            Any(),
-        ] + list(self.shape[1:])
+        output_shape = [Any()] + list(self.shape[1:])
         output_tensor_type_var, _, output_ops = 
self._get_adt_by_shape(output_shape)
         output_ops.define_tensor_array_write()
         write_var = output_ops.get_global_var("tensor_array_write")
@@ -575,9 +566,7 @@ class StaticTensorArrayOps(object):
 
         concat_var = GlobalVar(concat_name)
 
-        output_shape = [
-            Any(),
-        ] + list(self.shape[1:])
+        output_shape = [Any()] + list(self.shape[1:])
 
         tensor_type_var, _, output_ops = self._get_adt_by_shape(output_shape)
 
@@ -617,9 +606,7 @@ class StaticTensorArrayOps(object):
 
         # Register tensor_concatenate for output_shape
         new_axis = Any() if not self.batch_dim or self.batch_dim != 1 else 
self.batch_dim
-        output_shape = [
-            new_axis,
-        ] + list(self.shape)
+        output_shape = [new_axis] + list(self.shape)
         _, _, output_ops = self._get_adt_by_shape(output_shape)
         output_ops.define_tensor_concatenate()
         concat_var = output_ops.get_global_var("tensor_concatenate")
@@ -627,9 +614,7 @@ class StaticTensorArrayOps(object):
         tensor_array_expand_dims = self.prelude.map(expand_dims_var, 
tensor_array)
         if self.batch_dim is not None and self.batch_dim == 1:
             # only one element
-            tensors = self.prelude.id(
-                self.prelude.hd(tensor_array_expand_dims),
-            )
+            tensors = 
self.prelude.id(self.prelude.hd(tensor_array_expand_dims))
         else:
             tensors = self.prelude.foldl(
                 concat_var,
@@ -650,9 +635,7 @@ class StaticTensorArrayOps(object):
         helper_var = self._create_global_var(helper_name)
 
         new_axis = Any() if self.batch_dim is None or self.batch_dim != 1 else 
self.batch_dim
-        output_shape = [
-            new_axis,
-        ] + list(self.shape)
+        output_shape = [new_axis] + list(self.shape)
         output_tensor_type_var, _, _ = self._get_adt_by_shape(output_shape)
         stack_var = self.get_global_var("tensor_array_stack")
         read_var = self.get_global_var("tensor_array_read")
@@ -1130,10 +1113,7 @@ class TensorArrayOps(object):
         shape = op.shape_of(tensor2)
         ndim = op.take(shape, const(0))
         self.prelude.mod[tensor_array_unstack_tensor2_var] = Function(
-            [tensor2],
-            helper_var(const(0), ndim, tensor2),
-            self.list(self.tensor_type_var()),
-            [],
+            [tensor2], helper_var(const(0), ndim, tensor2), 
self.list(self.tensor_type_var()), []
         )
 
     def define_tensor_array_unstack_tensor3(self):
@@ -1167,10 +1147,7 @@ class TensorArrayOps(object):
         shape = op.shape_of(tensor3)
         ndim = op.take(shape, const(0))
         self.prelude.mod[tensor_array_unstack_tensor3_var] = Function(
-            [tensor3],
-            helper_var(const(0), ndim, tensor3),
-            self.list(self.tensor_type_var()),
-            [],
+            [tensor3], helper_var(const(0), ndim, tensor3), 
self.list(self.tensor_type_var()), []
         )
 
     def define_tensor_array_unstack_tensor4(self):
@@ -1204,10 +1181,7 @@ class TensorArrayOps(object):
         shape = op.shape_of(tensor4)
         ndim = op.take(shape, const(0))
         self.prelude.mod[tensor_array_unstack_tensor4_var] = Function(
-            [tensor4],
-            helper_var(const(0), ndim, tensor4),
-            self.list(self.tensor_type_var()),
-            [],
+            [tensor4], helper_var(const(0), ndim, tensor4), 
self.list(self.tensor_type_var()), []
         )
 
     def define_tensor_array_unstack_tensor5(self):
@@ -1241,10 +1215,7 @@ class TensorArrayOps(object):
         shape = op.shape_of(tensor5)
         ndim = op.take(shape, const(0))
         self.prelude.mod[tensor_array_unstack_tensor5_var] = Function(
-            [tensor5],
-            helper_var(const(0), ndim, tensor5),
-            self.list(self.tensor_type_var()),
-            [],
+            [tensor5], helper_var(const(0), ndim, tensor5), 
self.list(self.tensor_type_var()), []
         )
 
     def define_tensor_array_unstack_tensor6(self):
@@ -1278,10 +1249,7 @@ class TensorArrayOps(object):
         shape = op.shape_of(tensor6)
         ndim = op.take(shape, const(0))
         self.prelude.mod[tensor_array_unstack_tensor6_var] = Function(
-            [tensor6],
-            helper_var(const(0), ndim, tensor6),
-            self.list(self.tensor_type_var()),
-            [],
+            [tensor6], helper_var(const(0), ndim, tensor6), 
self.list(self.tensor_type_var()), []
         )
 
     def define_tensor_array_scatter(self):
@@ -1507,8 +1475,8 @@ class Prelude:
     def get_name(self, canonical, dtype):
         """Get name corresponding to the canonical name"""
         if canonical == "tensor_t":
-            return "tensor_{}_t".format(dtype)
-        return "{}_{}".format(canonical, dtype)
+            return f"tensor_{dtype}_t"
+        return f"{canonical}_{dtype}"
 
     def get_global_var(self, canonical, dtype):
         """Get global var corresponding to the canonical name"""
diff --git a/python/tvm/relay/qnn/op/layout_conversions.py 
b/python/tvm/relay/qnn/op/layout_conversions.py
index 24c787e0a0..668cafb8ae 100644
--- a/python/tvm/relay/qnn/op/layout_conversions.py
+++ b/python/tvm/relay/qnn/op/layout_conversions.py
@@ -77,7 +77,7 @@ def convert_qnn_conv2d(attrs, inputs, tinfos, 
desired_layouts):
             new_attrs["kernel_layout"] = "HWIO"
         return relay.qnn.op.conv2d(*inputs, **new_attrs)
 
-    raise ValueError("Layout %s is not yet supported" % desired_data_layout)
+    raise ValueError(f"Layout {desired_data_layout} is not yet supported")
 
 
 @reg.register_convert_op_layout("qnn.conv2d_transpose")
@@ -125,4 +125,4 @@ def convert_qnn_conv2d_transpose(attrs, inputs, tinfos, 
desired_layouts):
         new_attrs["kernel_layout"] = "HWIO"
         return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
 
-    raise ValueError("Layout %s is not yet supported" % desired_data_layout)
+    raise ValueError(f"Layout {desired_data_layout} is not yet supported")
diff --git a/python/tvm/relay/qnn/op/qnn.py b/python/tvm/relay/qnn/op/qnn.py
index 504688759e..e2c251ec78 100644
--- a/python/tvm/relay/qnn/op/qnn.py
+++ b/python/tvm/relay/qnn/op/qnn.py
@@ -86,7 +86,7 @@ class RequantizeConfig(Object):
 
     def __setattr__(self, name, value):
         if name in RequantizeConfig._node_defaults:
-            raise AttributeError("'%s' object cannot set attribute '%s'" % 
(str(type(self)), name))
+            raise AttributeError(f"'{type(self)}' object cannot set attribute 
'{name}'")
         return super(RequantizeConfig, self).__setattr__(name, value)
 
 
@@ -876,13 +876,7 @@ def tanh(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.tanh(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.tanh(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def exp(x, scale, zero_point, output_scale, output_zero_point):
@@ -911,13 +905,7 @@ def exp(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.exp(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.exp(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def sqrt(x, scale, zero_point, output_scale, output_zero_point):
@@ -946,13 +934,7 @@ def sqrt(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.sqrt(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.sqrt(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def rsqrt(x, scale, zero_point, output_scale, output_zero_point):
@@ -981,13 +963,7 @@ def rsqrt(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.rsqrt(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.rsqrt(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def erf(x, scale, zero_point, output_scale, output_zero_point):
@@ -1016,13 +992,7 @@ def erf(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.erf(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.erf(x, scale, zero_point, output_scale, output_zero_point)
 
 
 # pylint: disable=redefined-builtin
@@ -1054,13 +1024,7 @@ def abs(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.abs(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.abs(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def sigmoid(x, scale, zero_point, output_scale, output_zero_point):
@@ -1089,13 +1053,7 @@ def sigmoid(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.sigmoid(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.sigmoid(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def hardswish(x, scale, zero_point, output_scale, output_zero_point):
@@ -1124,13 +1082,7 @@ def hardswish(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.hardswish(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.hardswish(x, scale, zero_point, output_scale, 
output_zero_point)
 
 
 def log(x, scale, zero_point, output_scale, output_zero_point):
@@ -1159,13 +1111,7 @@ def log(x, scale, zero_point, output_scale, 
output_zero_point):
         The computed result.
 
     """
-    return _make.log(
-        x,
-        scale,
-        zero_point,
-        output_scale,
-        output_zero_point,
-    )
+    return _make.log(x, scale, zero_point, output_scale, output_zero_point)
 
 
 def subtract(
@@ -1297,10 +1243,5 @@ def leaky_relu(x, alpha, input_scale, input_zero_point, 
output_scale, output_zer
         The computed result.
     """
     return _make.leaky_relu(
-        x,
-        alpha,
-        input_scale,
-        input_zero_point,
-        output_scale,
-        output_zero_point,
+        x, alpha, input_scale, input_zero_point, output_scale, 
output_zero_point
     )
diff --git a/python/tvm/relay/quantize/_calibrate.py 
b/python/tvm/relay/quantize/_calibrate.py
index 4b2d55ebe8..f03d556814 100644
--- a/python/tvm/relay/quantize/_calibrate.py
+++ b/python/tvm/relay/quantize/_calibrate.py
@@ -224,14 +224,14 @@ def calibrate(dataset=None):
         elif cfg.calibrate_mode == "percentile":
             input_scale_func = _percentile_scale(mod, dataset)
         else:
-            raise ValueError("Unknown calibrate mode 
{}".format(cfg.calibrate_mode))
+            raise ValueError(f"Unknown calibrate mode {cfg.calibrate_mode}")
 
         if cfg.weight_scale == "max":
             weight_scale_func = _max_scale
         elif cfg.weight_scale == "power2":
             weight_scale_func = _power2_scale
         else:
-            raise ValueError("Unknown weight scale mode 
{}".format(cfg.weight_scale))
+            raise ValueError(f"Unknown weight scale mode {cfg.weight_scale}")
 
         return _set_params(mod, input_scale_func, weight_scale_func)
 
diff --git a/python/tvm/relay/quantize/quantize.py 
b/python/tvm/relay/quantize/quantize.py
index 7f4724db22..41343061da 100644
--- a/python/tvm/relay/quantize/quantize.py
+++ b/python/tvm/relay/quantize/quantize.py
@@ -128,7 +128,7 @@ class QConfig(Object):
 
     def __setattr__(self, name, value):
         if name in QConfig._node_defaults:
-            raise AttributeError("'%s' object cannot set attribute '%s'" % 
(str(type(self)), name))
+            raise AttributeError(f"'{type(self)}' object cannot set attribute 
'{name}'")
         return super(QConfig, self).__setattr__(name, value)
 
 
@@ -304,7 +304,7 @@ def _bind_params(func, params):
             continue
         arg = name_dict[k]
         if arg is None:
-            raise ValueError("Multiple args in the function have name %s" % k)
+            raise ValueError(f"Multiple args in the function have name {k}")
         bind_dict[arg] = _expr.const(v)
     return _expr.bind(func, bind_dict)
 
diff --git a/python/tvm/relay/testing/dcgan.py 
b/python/tvm/relay/testing/dcgan.py
index acc478330d..4749d76dbc 100644
--- a/python/tvm/relay/testing/dcgan.py
+++ b/python/tvm/relay/testing/dcgan.py
@@ -65,10 +65,10 @@ def deconv2d(data, ishape, oshape, kshape, layout, name, 
stride=(2, 2)):
 def deconv2d_bn_relu(data, prefix, **kwargs):
     """a block of deconv + batch norm + relu"""
     eps = 1e-5 + 1e-12
-    net = deconv2d(data, name="%s_deconv" % prefix, **kwargs)
+    net = deconv2d(data, name=f"{prefix}_deconv", **kwargs)
     bn_axis = kwargs.get("layout", "NCHW").index("C")
     net = layers.batch_norm_infer(
-        net, epsilon=eps, scale=False, axis=bn_axis, name="%s_batch_norm" % 
prefix
+        net, epsilon=eps, scale=False, axis=bn_axis, 
name=f"{prefix}_batch_norm"
     )
     net = relay.nn.relu(net)
     return net
diff --git a/python/tvm/relay/testing/densenet.py 
b/python/tvm/relay/testing/densenet.py
index 6b8d0098a5..c9deb78683 100644
--- a/python/tvm/relay/testing/densenet.py
+++ b/python/tvm/relay/testing/densenet.py
@@ -28,15 +28,15 @@ from .init import create_workload
 
 def _make_dense_layer(data, growth_rate, bn_size, index):
     """Single densenet layer."""
-    bn1 = layers.batch_norm_infer(data, name="batch_1_%s" % index)
+    bn1 = layers.batch_norm_infer(data, name=f"batch_1_{index}")
     relu1 = relay.nn.relu(bn1)
     conv1 = layers.conv2d(
-        relu1, channels=bn_size * growth_rate, kernel_size=(1, 1), 
name="conv2d_1_%s" % index
+        relu1, channels=bn_size * growth_rate, kernel_size=(1, 1), 
name=f"conv2d_1_{index}"
     )
     bn2 = layers.batch_norm_infer(conv1, name="batch_2_" + index)
     relu2 = relay.nn.relu(bn2)
     conv2 = layers.conv2d(
-        relu2, channels=growth_rate, kernel_size=(3, 3), padding=(1, 1), 
name="conv2d_2_%s" % index
+        relu2, channels=growth_rate, kernel_size=(3, 3), padding=(1, 1), 
name=f"conv2d_2_{index}"
     )
     return conv2
 
@@ -46,7 +46,7 @@ def _make_dense_block(data, num_layers, bn_size, growth_rate, 
index):
     layer_out = data
     blocks = []
     for i in range(num_layers):
-        layer_out = _make_dense_layer(layer_out, growth_rate, bn_size, "%s_%s" 
% (index, i))
+        layer_out = _make_dense_layer(layer_out, growth_rate, bn_size, 
f"{index}_{i}")
         blocks.append(layer_out)
     block_out = relay.concatenate(blocks, 1)
     return block_out
@@ -54,10 +54,10 @@ def _make_dense_block(data, num_layers, bn_size, 
growth_rate, index):
 
 def _make_transition(data, num_output_features, index):
     """Transition between layers."""
-    bn = layers.batch_norm_infer(data, name="batch_t_%s" % index)
+    bn = layers.batch_norm_infer(data, name=f"batch_t_{index}")
     relu = relay.nn.relu(bn)
     conv = layers.conv2d(
-        relu, channels=num_output_features, kernel_size=(1, 1), 
name="conv_t_%s" % index
+        relu, channels=num_output_features, kernel_size=(1, 1), 
name=f"conv_t_{index}"
     )
     return relay.nn.avg_pool2d(conv, pool_size=(2, 2), strides=(2, 2))
 
diff --git a/python/tvm/relay/testing/inception_v3.py 
b/python/tvm/relay/testing/inception_v3.py
index 2381551f66..e5b89ccdec 100644
--- a/python/tvm/relay/testing/inception_v3.py
+++ b/python/tvm/relay/testing/inception_v3.py
@@ -37,12 +37,10 @@ def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), 
pad=(0, 0), name=None,
         kernel_size=kernel,
         strides=stride,
         padding=pad,
-        name="%s%s_conv1" % (name, suffix),
+        name=f"{name}{suffix}_conv1",
     )
 
-    bn = layers.batch_norm_infer(
-        data=conv, epsilon=2e-5, scale=False, name="%s%s_bn" % (name, suffix)
-    )
+    bn = layers.batch_norm_infer(data=conv, epsilon=2e-5, scale=False, 
name=f"{name}{suffix}_bn")
     act = relay.nn.relu(data=bn)
     return act
 
@@ -60,27 +58,17 @@ def Pooling(data, kernel, stride, pad, pool_type, name):
 def Inception7A(
     data, num_1x1, num_3x3_red, num_3x3_1, num_3x3_2, num_5x5_red, num_5x5, 
pool, proj, name
 ):
-    tower_1x1 = Conv(data, num_1x1, name=("%s_conv" % name))
-    tower_5x5 = Conv(data, num_5x5_red, name=("%s_tower" % name), 
suffix="_conv")
+    tower_1x1 = Conv(data, num_1x1, name=f"{name}_conv")
+    tower_5x5 = Conv(data, num_5x5_red, name=f"{name}_tower", suffix="_conv")
     tower_5x5 = Conv(
-        tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=("%s_tower" % 
name), suffix="_conv_1"
+        tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=f"{name}_tower", 
suffix="_conv_1"
     )
-    tower_3x3 = Conv(data, num_3x3_red, name=("%s_tower_1" % name), 
suffix="_conv")
+    tower_3x3 = Conv(data, num_3x3_red, name=f"{name}_tower_1", suffix="_conv")
     tower_3x3 = Conv(
-        tower_3x3,
-        num_3x3_1,
-        kernel=(3, 3),
-        pad=(1, 1),
-        name=("%s_tower_1" % name),
-        suffix="_conv_1",
+        tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), 
name=f"{name}_tower_1", suffix="_conv_1"
     )
     tower_3x3 = Conv(
-        tower_3x3,
-        num_3x3_2,
-        kernel=(3, 3),
-        pad=(1, 1),
-        name=("%s_tower_1" % name),
-        suffix="_conv_2",
+        tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), 
name=f"{name}_tower_1", suffix="_conv_2"
     )
     pooling = Pooling(
         data=data,
@@ -88,27 +76,25 @@ def Inception7A(
         stride=(1, 1),
         pad=(1, 1),
         pool_type=pool,
-        name=("%s_pool_%s_pool" % (pool, name)),
+        name=f"{pool}_pool_{name}_pool",
     )
 
-    cproj = Conv(pooling, proj, name=("%s_tower_2" % name), suffix="_conv")
+    cproj = Conv(pooling, proj, name=f"{name}_tower_2", suffix="_conv")
     concat = relay.concatenate((tower_1x1, tower_5x5, tower_3x3, cproj), 
axis=1)
     return concat
 
 
 # First Downsample
 def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, num_d3x3_2, pool, 
name):
-    tower_3x3 = Conv(
-        data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), 
name=("%s_conv" % name)
-    )
-    tower_d3x3 = Conv(data, num_d3x3_red, name=("%s_tower" % name), 
suffix="_conv")
+    tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), 
name=f"{name}_conv")
+    tower_d3x3 = Conv(data, num_d3x3_red, name=f"{name}_tower", suffix="_conv")
     tower_d3x3 = Conv(
         tower_d3x3,
         num_d3x3_1,
         kernel=(3, 3),
         pad=(1, 1),
         stride=(1, 1),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_conv_1",
     )
     tower_d3x3 = Conv(
@@ -117,7 +103,7 @@ def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, 
num_d3x3_2, pool, name)
         kernel=(3, 3),
         pad=(0, 0),
         stride=(2, 2),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_conv_2",
     )
     pooling = Pooling(
@@ -126,7 +112,7 @@ def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, 
num_d3x3_2, pool, name)
         stride=(2, 2),
         pad=(0, 0),
         pool_type="max",
-        name=("max_pool_%s_pool" % name),
+        name=f"max_pool_{name}_pool",
     )
     concat = relay.concatenate((tower_3x3, tower_d3x3, pooling), axis=1)
     return concat
@@ -147,14 +133,14 @@ def Inception7C(
     proj,
     name,
 ):
-    tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), 
name=("%s_conv" % name))
-    tower_d7 = Conv(data=data, num_filter=num_d7_red, name=("%s_tower" % 
name), suffix="_conv")
+    tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), 
name=f"{name}_conv")
+    tower_d7 = Conv(data=data, num_filter=num_d7_red, name=f"{name}_tower", 
suffix="_conv")
     tower_d7 = Conv(
         data=tower_d7,
         num_filter=num_d7_1,
         kernel=(1, 7),
         pad=(0, 3),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_conv_1",
     )
     tower_d7 = Conv(
@@ -162,16 +148,16 @@ def Inception7C(
         num_filter=num_d7_2,
         kernel=(7, 1),
         pad=(3, 0),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_conv_2",
     )
-    tower_q7 = Conv(data=data, num_filter=num_q7_red, name=("%s_tower_1" % 
name), suffix="_conv")
+    tower_q7 = Conv(data=data, num_filter=num_q7_red, name=f"{name}_tower_1", 
suffix="_conv")
     tower_q7 = Conv(
         data=tower_q7,
         num_filter=num_q7_1,
         kernel=(7, 1),
         pad=(3, 0),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_1",
     )
     tower_q7 = Conv(
@@ -179,7 +165,7 @@ def Inception7C(
         num_filter=num_q7_2,
         kernel=(1, 7),
         pad=(0, 3),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_2",
     )
     tower_q7 = Conv(
@@ -187,7 +173,7 @@ def Inception7C(
         num_filter=num_q7_3,
         kernel=(7, 1),
         pad=(3, 0),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_3",
     )
     tower_q7 = Conv(
@@ -195,7 +181,7 @@ def Inception7C(
         num_filter=num_q7_4,
         kernel=(1, 7),
         pad=(0, 3),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_4",
     )
     pooling = Pooling(
@@ -204,10 +190,10 @@ def Inception7C(
         stride=(1, 1),
         pad=(1, 1),
         pool_type=pool,
-        name=("%s_pool_%s_pool" % (pool, name)),
+        name=f"{pool}_pool_{name}_pool",
     )
     cproj = Conv(
-        data=pooling, num_filter=proj, kernel=(1, 1), name=("%s_tower_2" % 
name), suffix="_conv"
+        data=pooling, num_filter=proj, kernel=(1, 1), name=f"{name}_tower_2", 
suffix="_conv"
     )
     # concat
     concat = relay.concatenate((tower_1x1, tower_d7, tower_q7, cproj), axis=1)
@@ -217,25 +203,25 @@ def Inception7C(
 def Inception7D(
     data, num_3x3_red, num_3x3, num_d7_3x3_red, num_d7_1, num_d7_2, 
num_d7_3x3, pool, name
 ):
-    tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=("%s_tower" % 
name), suffix="_conv")
+    tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=f"{name}_tower", 
suffix="_conv")
     tower_3x3 = Conv(
         data=tower_3x3,
         num_filter=num_3x3,
         kernel=(3, 3),
         pad=(0, 0),
         stride=(2, 2),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_conv_1",
     )
     tower_d7_3x3 = Conv(
-        data=data, num_filter=num_d7_3x3_red, name=("%s_tower_1" % name), 
suffix="_conv"
+        data=data, num_filter=num_d7_3x3_red, name=f"{name}_tower_1", 
suffix="_conv"
     )
     tower_d7_3x3 = Conv(
         data=tower_d7_3x3,
         num_filter=num_d7_1,
         kernel=(1, 7),
         pad=(0, 3),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_1",
     )
     tower_d7_3x3 = Conv(
@@ -243,7 +229,7 @@ def Inception7D(
         num_filter=num_d7_2,
         kernel=(7, 1),
         pad=(3, 0),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_2",
     )
     tower_d7_3x3 = Conv(
@@ -251,7 +237,7 @@ def Inception7D(
         num_filter=num_d7_3x3,
         kernel=(3, 3),
         stride=(2, 2),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_3",
     )
     pooling = Pooling(
@@ -260,7 +246,7 @@ def Inception7D(
         stride=(2, 2),
         pool_type=pool,
         pad=(0, 0),
-        name=("%s_pool_%s_pool" % (pool, name)),
+        name=f"{pool}_pool_{name}_pool",
     )
     # concat
     concat = relay.concatenate((tower_3x3, tower_d7_3x3, pooling), axis=1)
@@ -281,14 +267,14 @@ def Inception7E(
     proj,
     name,
 ):
-    tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), 
name=("%s_conv" % name))
-    tower_d3 = Conv(data=data, num_filter=num_d3_red, name=("%s_tower" % 
name), suffix="_conv")
+    tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), 
name=f"{name}_conv")
+    tower_d3 = Conv(data=data, num_filter=num_d3_red, name=f"{name}_tower", 
suffix="_conv")
     tower_d3_a = Conv(
         data=tower_d3,
         num_filter=num_d3_1,
         kernel=(1, 3),
         pad=(0, 1),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_mixed_conv",
     )
     tower_d3_b = Conv(
@@ -296,18 +282,18 @@ def Inception7E(
         num_filter=num_d3_2,
         kernel=(3, 1),
         pad=(1, 0),
-        name=("%s_tower" % name),
+        name=f"{name}_tower",
         suffix="_mixed_conv_1",
     )
     tower_3x3_d3 = Conv(
-        data=data, num_filter=num_3x3_d3_red, name=("%s_tower_1" % name), 
suffix="_conv"
+        data=data, num_filter=num_3x3_d3_red, name=f"{name}_tower_1", 
suffix="_conv"
     )
     tower_3x3_d3 = Conv(
         data=tower_3x3_d3,
         num_filter=num_3x3,
         kernel=(3, 3),
         pad=(1, 1),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_conv_1",
     )
     tower_3x3_d3_a = Conv(
@@ -315,7 +301,7 @@ def Inception7E(
         num_filter=num_3x3_d3_1,
         kernel=(1, 3),
         pad=(0, 1),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_mixed_conv",
     )
     tower_3x3_d3_b = Conv(
@@ -323,7 +309,7 @@ def Inception7E(
         num_filter=num_3x3_d3_2,
         kernel=(3, 1),
         pad=(1, 0),
-        name=("%s_tower_1" % name),
+        name=f"{name}_tower_1",
         suffix="_mixed_conv_1",
     )
     pooling = Pooling(
@@ -332,10 +318,10 @@ def Inception7E(
         stride=(1, 1),
         pad=(1, 1),
         pool_type=pool,
-        name=("%s_pool_%s_pool" % (pool, name)),
+        name=f"{pool}_pool_{name}_pool",
     )
     cproj = Conv(
-        data=pooling, num_filter=proj, kernel=(1, 1), name=("%s_tower_2" % 
name), suffix="_conv"
+        data=pooling, num_filter=proj, kernel=(1, 1), name=f"{name}_tower_2", 
suffix="_conv"
     )
     # concat
     concat = relay.concatenate(
diff --git a/python/tvm/relay/testing/init.py b/python/tvm/relay/testing/init.py
index f275712c77..373b5a8ec3 100644
--- a/python/tvm/relay/testing/init.py
+++ b/python/tvm/relay/testing/init.py
@@ -75,10 +75,10 @@ class Initializer(object):
 
     def _init_default(self, name, _):
         raise ValueError(
-            "Unknown initialization pattern for %s. "
-            "Default initialization is now limited to "
-            '"weight", "bias", "gamma" (1.0), and "beta" (0.0).'
-            "Please use mx.sym.Variable(init=mx.init.*) to set initialization 
pattern" % name
+            f"Unknown initialization pattern for {name}. "
+            f"Default initialization is now limited to "
+            f'"weight", "bias", "gamma" (1.0), and "beta" (0.0).'
+            f"Please use mx.sym.Variable(init=mx.init.*) to set initialization 
pattern"
         )
 
 
@@ -110,8 +110,7 @@ class Xavier(Initializer):
         hw_scale = 1.0
         if len(shape) < 2:
             raise ValueError(
-                "Xavier initializer cannot be applied to vector {0}. It 
requires at"
-                " least 2D.".format(name)
+                f"Xavier initializer cannot be applied to vector {name}. It 
requires at least 2D."
             )
         if len(shape) > 2:
             hw_scale = np.prod(shape[2:])
diff --git a/python/tvm/relay/testing/layers.py 
b/python/tvm/relay/testing/layers.py
index 48003f2ae2..8496c56400 100644
--- a/python/tvm/relay/testing/layers.py
+++ b/python/tvm/relay/testing/layers.py
@@ -189,14 +189,8 @@ def conv_kernel_layout(data_layout, is_depthwise=False):
     result : str
         The corresponding kernel layout.
     """
-    conv_layout_map = {
-        "NCHW": "OIHW",
-        "NHWC": "HWIO",
-    }
-    depthwise_conv_layout_map = {
-        "NCHW": "OIHW",
-        "NHWC": "HWOI",
-    }
+    conv_layout_map = {"NCHW": "OIHW", "NHWC": "HWIO"}
+    depthwise_conv_layout_map = {"NCHW": "OIHW", "NHWC": "HWOI"}
     mapping = depthwise_conv_layout_map if is_depthwise else conv_layout_map
-    assert data_layout in mapping, "Unknown data layout %s" % data_layout
+    assert data_layout in mapping, f"Unknown data layout {data_layout}"
     return mapping[data_layout]
diff --git a/python/tvm/relay/testing/lstm.py b/python/tvm/relay/testing/lstm.py
index 8a97c18a1f..bf054592b0 100644
--- a/python/tvm/relay/testing/lstm.py
+++ b/python/tvm/relay/testing/lstm.py
@@ -69,7 +69,7 @@ def lstm_cell(num_hidden, batch_size=1, dtype="float32", 
name=""):
     i2h = builder.let(
         ("i2h", dense_type),
         layers.dense_add_bias(
-            data=inputs, units=num_hidden * 4, weight=i2h_weight, 
bias=i2h_bias, name="%si2h" % name
+            data=inputs, units=num_hidden * 4, weight=i2h_weight, 
bias=i2h_bias, name=f"{name}i2h"
         ),
     )
     h2h = builder.let(
@@ -79,7 +79,7 @@ def lstm_cell(num_hidden, batch_size=1, dtype="float32", 
name=""):
             units=num_hidden * 4,
             weight=h2h_weight,
             bias=h2h_bias,
-            name="%sh2h" % name,
+            name=f"{name}h2h",
         ),
     )
 
@@ -138,19 +138,19 @@ def get_net(iterations, num_hidden, batch_size=1, 
dtype="float32"):
 
     for i in range(iterations):
         inputs = relay.Var("data", input_type)
-        i2h_weight = relay.Var("i2h_%s_weight" % i, weight_type)
-        i2h_bias = relay.Var("i2h_%i_bias" % i, bias_type)
-        h2h_weight = relay.Var("h2h_%s_weight" % i, weight_type)
-        h2h_bias = relay.Var("h2h_%s_bias" % i, bias_type)
+        i2h_weight = relay.Var(f"i2h_{i}_weight", weight_type)
+        i2h_bias = relay.Var(f"i2h_{i}_bias", bias_type)
+        h2h_weight = relay.Var(f"h2h_{i}_weight", weight_type)
+        h2h_bias = relay.Var(f"h2h_{i}_bias", bias_type)
 
-        cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_%s" % i)
+        cell_fn = lstm_cell(num_hidden, batch_size, dtype, f"lstm_{i}")
 
         call = builder.let(
-            ("call_%s" % i, cell_type),
+            (f"call_{i}", cell_type),
             relay.Call(cell_fn, [inputs, states, i2h_weight, i2h_bias, 
h2h_weight, h2h_bias]),
         )
-        new_out = builder.let(("out_%s" % i, input_type), 
relay.TupleGetItem(call, 0))
-        new_states = builder.let(("states_%s" % i, state_type), 
relay.TupleGetItem(call, 1))
+        new_out = builder.let((f"out_{i}", input_type), 
relay.TupleGetItem(call, 0))
+        new_states = builder.let((f"states_{i}", state_type), 
relay.TupleGetItem(call, 1))
         states = new_states
         out = new_out
 
diff --git a/python/tvm/relay/testing/mobilenet.py 
b/python/tvm/relay/testing/mobilenet.py
index 0b5593eedc..4c600966d2 100644
--- a/python/tvm/relay/testing/mobilenet.py
+++ b/python/tvm/relay/testing/mobilenet.py
@@ -188,7 +188,7 @@ def mobile_net(
         for i in range(7, 12):
             body = separable_conv_block(
                 body,
-                "separable_conv_block_%d" % i,
+                f"separable_conv_block_{i}",
                 int(512 * alpha),
                 int(512 * alpha),
                 layout=layout,
diff --git a/python/tvm/relay/testing/py_converter.py 
b/python/tvm/relay/testing/py_converter.py
index 44489aa9cf..9cbfcead47 100644
--- a/python/tvm/relay/testing/py_converter.py
+++ b/python/tvm/relay/testing/py_converter.py
@@ -133,13 +133,13 @@ class PythonConverter(ExprFunctor):
 
     def generate_var_name(self, name_hint: str) -> str:
         """Generates a unique variable name starting from the hint."""
-        name = "{}_var_{}".format(self.sanitize(name_hint), self.var_no)
+        name = f"{self.sanitize(name_hint)}_var_{self.var_no}"
         self.var_no += 1
         return name
 
     def generate_function_name(self, name_hint: str) -> str:
         """Generates a unique function name starting from the hint."""
-        name = "{}_fun_{}".format(self.sanitize(name_hint), self.fun_no)
+        name = f"{self.sanitize(name_hint)}_fun_{self.fun_no}"
         self.fun_no += 1
         return name
 
@@ -261,11 +261,7 @@ class PythonConverter(ExprFunctor):
             arguments = ast.arguments(inner_args, None, [], [], None, [])
 
         return ast.FunctionDef(
-            func_name,
-            arguments,
-            body,
-            decorator_list if register_packed else [],
-            None,
+            func_name, arguments, body, decorator_list if register_packed else 
[], None
         )
 
     def create_tuple(self, fields):
@@ -285,7 +281,7 @@ class PythonConverter(ExprFunctor):
         # compile the function and register globally
         cc_key = te_compiler.CCacheKey(op, self.tgt)
         func_hash = tvm.ir.structural_hash(op)
-        op_name = "_lowered_op_{}".format(func_hash)
+        op_name = f"_lowered_op_{func_hash}"
         if not tvm.get_global_func(op_name, allow_missing=True):
             jitted = self.tec.jit(cc_key, self.tgt)
             tvm.register_func(op_name, jitted)
@@ -334,8 +330,8 @@ class PythonConverter(ExprFunctor):
 
         # create a function to wrap the call of the lowered op and return
         # a call to that function
-        wrap_name = self.generate_function_name("_{}_wrapper".format(op_name))
-        wrap_args = [self.generate_var_name("_arg_{}".format(i)) for i in 
range(len(py_args))]
+        wrap_name = self.generate_function_name(f"_{op_name}_wrapper")
+        wrap_args = [self.generate_var_name(f"_arg_{i}") for i in 
range(len(py_args))]
 
         inner_call_args = []
         for i in range(len(py_args)):
@@ -588,10 +584,7 @@ class PythonConverter(ExprFunctor):
             [],
             ref_defs
             + val_defs
-            + [
-                Assign([ast.Attribute(ref, "value", Store())], val),
-                Return(self.create_tuple([])),
-            ],
+            + [Assign([ast.Attribute(ref, "value", Store())], val), 
Return(self.create_tuple([]))],
         )
         return (self.create_call(thunk_name, []), [thunk])
 
diff --git a/python/tvm/relay/testing/resnet.py 
b/python/tvm/relay/testing/resnet.py
index b35e01f677..e1e4069f54 100644
--- a/python/tvm/relay/testing/resnet.py
+++ b/python/tvm/relay/testing/resnet.py
@@ -239,7 +239,7 @@ def resnet(
             filter_list[i + 1],
             (1 if i == 0 else 2, 1 if i == 0 else 2),
             False,
-            name="stage%d_unit%d" % (i + 1, 1),
+            name=f"stage{i + 1}_unit1",
             bottle_neck=bottle_neck,
             data_layout=data_layout,
             kernel_layout=kernel_layout,
@@ -250,7 +250,7 @@ def resnet(
                 filter_list[i + 1],
                 (1, 1),
                 True,
-                name="stage%d_unit%d" % (i + 1, j + 2),
+                name=f"stage{i + 1}_unit{j + 2}",
                 bottle_neck=bottle_neck,
                 data_layout=data_layout,
                 kernel_layout=kernel_layout,
@@ -293,7 +293,7 @@ def get_net(
             filter_list = [16, 16, 32, 64]
             bottle_neck = False
         else:
-            raise ValueError("no experiments done on num_layers 
{}".format(num_layers))
+            raise ValueError(f"no experiments done on num_layers {num_layers}")
         units = per_unit * num_stages
     else:
         if num_layers >= 50:
@@ -318,7 +318,7 @@ def get_net(
         elif num_layers == 269:
             units = [3, 30, 48, 8]
         else:
-            raise ValueError("no experiments done on num_layers 
{}".format(num_layers))
+            raise ValueError(f"no experiments done on num_layers {num_layers}")
 
     return resnet(
         units=units,
diff --git a/python/tvm/relay/testing/resnet_3d.py 
b/python/tvm/relay/testing/resnet_3d.py
index 715e3951b8..b20833402a 100644
--- a/python/tvm/relay/testing/resnet_3d.py
+++ b/python/tvm/relay/testing/resnet_3d.py
@@ -233,7 +233,7 @@ def resnet(
             filter_list[i + 1],
             (1 if i == 0 else 2, 1 if i == 0 else 2, 1 if i == 0 else 2),
             False,
-            name="stage%d_unit%d" % (i + 1, 1),
+            name=f"stage{i + 1}_unit1",
             bottle_neck=bottle_neck,
             data_layout=data_layout,
             kernel_layout=kernel_layout,
@@ -244,7 +244,7 @@ def resnet(
                 filter_list[i + 1],
                 (1, 1, 1),
                 True,
-                name="stage%d_unit%d" % (i + 1, j + 2),
+                name=f"stage{i + 1}_unit{j + 2}",
                 bottle_neck=bottle_neck,
                 data_layout=data_layout,
                 kernel_layout=kernel_layout,
@@ -288,7 +288,7 @@ def get_net(
             filter_list = [16, 16, 32, 64]
             bottle_neck = False
         else:
-            raise ValueError("no experiments done on num_layers 
{}".format(num_layers))
+            raise ValueError(f"no experiments done on num_layers {num_layers}")
         units = per_unit * num_stages
     else:
         if num_layers >= 50:
@@ -313,7 +313,7 @@ def get_net(
         elif num_layers == 269:
             units = [3, 30, 48, 8]
         else:
-            raise ValueError("no experiments done on num_layers 
{}".format(num_layers))
+            raise ValueError(f"no experiments done on num_layers {num_layers}")
 
     return resnet(
         units=units,
diff --git a/python/tvm/relay/testing/squeezenet.py 
b/python/tvm/relay/testing/squeezenet.py
index 097f2230af..ce918fd879 100644
--- a/python/tvm/relay/testing/squeezenet.py
+++ b/python/tvm/relay/testing/squeezenet.py
@@ -32,10 +32,10 @@ from . import layers
 
 # Helpers
 def _make_fire(net, squeeze_channels, expand1x1_channels, expand3x3_channels, 
prefix):
-    net = _make_fire_conv(net, squeeze_channels, 1, 0, "%s_input" % prefix)
+    net = _make_fire_conv(net, squeeze_channels, 1, 0, f"{prefix}_input")
 
-    left = _make_fire_conv(net, expand1x1_channels, 1, 0, "%s_left" % prefix)
-    right = _make_fire_conv(net, expand3x3_channels, 3, 1, "%s_right" % prefix)
+    left = _make_fire_conv(net, expand1x1_channels, 1, 0, f"{prefix}_left")
+    right = _make_fire_conv(net, expand3x3_channels, 3, 1, f"{prefix}_right")
     # NOTE : Assume NCHW layout here
     net = relay.concatenate((left, right), axis=1)
     return net
@@ -47,9 +47,9 @@ def _make_fire_conv(net, channels, kernel_size, padding=0, 
prefix=""):
         channels=channels,
         kernel_size=(kernel_size, kernel_size),
         padding=(padding, padding),
-        name="%s_conv" % prefix,
+        name=f"{prefix}_conv",
     )
-    net = relay.nn.bias_add(net, relay.var("%s_conv_bias" % prefix))
+    net = relay.nn.bias_add(net, relay.var(f"{prefix}_conv_bias"))
     net = relay.nn.relu(net)
     return net
 
@@ -72,10 +72,9 @@ def get_net(batch_size, image_shape, num_classes, version, 
dtype):
     version : str, optional
         "1.0" or "1.1" of SqueezeNet
     """
-    assert version in [
-        "1.0",
-        "1.1",
-    ], "Unsupported SqueezeNet version {version}:" "1.0 or 1.1 
expected".format(version=version)
+    assert version in ["1.0", "1.1"], (
+        f"Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected"
+    )
     data_shape = (batch_size,) + image_shape
     net = relay.var("data", shape=data_shape, dtype=dtype)
     if version == "1.0":
diff --git a/python/tvm/relay/testing/tf.py b/python/tvm/relay/testing/tf.py
index e09111a205..158de22eea 100644
--- a/python/tvm/relay/testing/tf.py
+++ b/python/tvm/relay/testing/tf.py
@@ -109,9 +109,9 @@ def vmobj_to_list(o):
         elif "tensor" in o.constructor.name_hint:
             result = [o.fields[0].numpy()]
         else:
-            raise RuntimeError("Unknown object type: %s" % 
o.constructor.name_hint)
+            raise RuntimeError(f"Unknown object type: 
{o.constructor.name_hint}")
     else:
-        raise RuntimeError("Unknown object type: %s" % type(o))
+        raise RuntimeError(f"Unknown object type: {type(o)}")
     return result
 
 
@@ -134,9 +134,7 @@ def AddShapesToGraphDef(session, out_node):
     """
 
     graph_def = tf_compat_v1.graph_util.convert_variables_to_constants(
-        session,
-        session.graph.as_graph_def(add_shapes=True),
-        convert_to_list(out_node),
+        session, session.graph.as_graph_def(add_shapes=True), 
convert_to_list(out_node)
     )
     return graph_def
 
diff --git a/python/tvm/relay/testing/tflite.py 
b/python/tvm/relay/testing/tflite.py
index b698b004b4..df9c0bcadf 100644
--- a/python/tvm/relay/testing/tflite.py
+++ b/python/tvm/relay/testing/tflite.py
@@ -56,7 +56,7 @@ class TFLiteModel:
             elif activation == "NONE":
                 pass
             else:
-                assert False, "Unsupported activation {}".format(activation)
+                assert False, f"Unsupported activation {activation}"
             return op
 
         return conv2d_single_function
diff --git a/python/tvm/relay/testing/vgg.py b/python/tvm/relay/testing/vgg.py
index b14c069ed0..426cd9e608 100644
--- a/python/tvm/relay/testing/vgg.py
+++ b/python/tvm/relay/testing/vgg.py
@@ -34,14 +34,14 @@ def get_feature(internal_layer, layers, filters, 
batch_norm=False):
                 kernel_size=(3, 3),
                 padding=(1, 1),
                 channels=filters[i],
-                name="conv%s_%s" % (i + 1, j + 1),
+                name=f"conv{i + 1}_{j + 1}",
             )
             internal_layer = relay.nn.bias_add(
-                internal_layer, relay.var("conv%s_%s_bias" % (i + 1, j + 1))
+                internal_layer, relay.var(f"conv{i + 1}_{j + 1}_bias")
             )
             if batch_norm:
                 internal_layer = wrapper.batch_norm_infer(
-                    data=internal_layer, name="bn%s_%s" % (i + 1, j + 1)
+                    data=internal_layer, name=f"bn{i + 1}_{j + 1}"
                 )
             internal_layer = relay.nn.relu(data=internal_layer)
         internal_layer = relay.nn.max_pool2d(data=internal_layer, 
pool_size=(2, 2), strides=(2, 2))
@@ -90,7 +90,7 @@ def get_net(batch_size, image_shape, num_classes, dtype, 
num_layers=11, batch_no
         19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512]),
     }
     if num_layers not in vgg_spec:
-        raise ValueError("Invalide num_layers {}. Choices are 
11,13,16,19.".format(num_layers))
+        raise ValueError(f"Invalid num_layers {num_layers}. Choices are 
11,13,16,19.")
     layers, filters = vgg_spec[num_layers]
     data_shape = (batch_size,) + image_shape
     data = relay.var("data", shape=data_shape, dtype=dtype)
diff --git a/python/tvm/relay/transform/fake_quantization_to_integer.py 
b/python/tvm/relay/transform/fake_quantization_to_integer.py
index 7375a4f3c0..82255c5663 100644
--- a/python/tvm/relay/transform/fake_quantization_to_integer.py
+++ b/python/tvm/relay/transform/fake_quantization_to_integer.py
@@ -219,13 +219,7 @@ def bias_add(expr, type_map):
             and tvm.ir.structural_equal(x_t.dtype, b_t.dtype)
         ):
             b = relay.qnn.op.requantize(
-                b,
-                b_t.scale,
-                b_t.zero_point,
-                in_scale,
-                in_zero_point,
-                out_dtype=x_t.dtype,
-                axis=0,
+                b, b_t.scale, b_t.zero_point, in_scale, in_zero_point, 
out_dtype=x_t.dtype, axis=0
             )
     else:
         # If the bias is a constant, we need to quantize it
@@ -522,15 +516,13 @@ def register_binary_qnn(op_name, op):
             # addition is typically done in 32 bit).
             return [left + right, left_t]
 
-        assert (
-            len(out_t.scale.data.shape) == 0
-        ), "The output scale needs to be a scalar, but got a tensor of shape 
{}".format(
-            out_t.scale.data.shape
+        assert len(out_t.scale.data.shape) == 0, (
+            f"The output scale needs to be a scalar, but got a tensor of shape 
"
+            f"{out_t.scale.data.shape}"
         )
-        assert (
-            len(out_t.zero_point.data.shape) == 0
-        ), "The output zero point needs to be a scalar, but got a tensor of 
shape {}".format(
-            out_t.zero_point.data.shape
+        assert len(out_t.zero_point.data.shape) == 0, (
+            f"The output zero point needs to be a scalar, but got a tensor of 
shape "
+            f"{out_t.zero_point.data.shape}"
         )
 
         out = op(
@@ -601,13 +593,7 @@ def register_unary_qnn(op_name, op):
         arg = expr.args[0]
         x_t = type_map[arg]
         out_t = type_map[expr]
-        out = op(
-            arg,
-            x_t.scale,
-            x_t.zero_point,
-            out_t.scale,
-            out_t.zero_point,
-        )
+        out = op(arg, x_t.scale, x_t.zero_point, out_t.scale, out_t.zero_point)
         return [out, out_t]
 
     return register_fake_quantization_to_integer(op_name, unary)
diff --git a/python/tvm/relay/type_functor.py b/python/tvm/relay/type_functor.py
index 490464ba12..39f94aeca7 100644
--- a/python/tvm/relay/type_functor.py
+++ b/python/tvm/relay/type_functor.py
@@ -64,7 +64,7 @@ class TypeFunctor:
         elif isinstance(typ, TypeData):
             return self.visit_type_data(typ)
         else:
-            raise Exception("unhandled case: {0}".format(type(typ)))
+            raise Exception(f"unhandled case: {type(typ)}")
 
     def visit_type_var(self, _):
         raise NotImplementedError()

Reply via email to