This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph-ai.git


The following commit(s) were added to refs/heads/main by this push:
     new 672aeef  fix: pylint in ml (#125)
672aeef is described below

commit 672aeef06fa1e3b6ebe96838e0f2368c7ce9f3ee
Author: SoJ <[email protected]>
AuthorDate: Fri Dec 6 13:00:00 2024 +0800

    fix: pylint in ml (#125)
---
 .../src/hugegraph_ml/data/hugegraph2dgl.py         |  1 +
 .../src/hugegraph_ml/examples/bgnn_example.py      |  4 +-
 .../src/hugegraph_ml/examples/bgrl_example.py      |  2 +-
 .../src/hugegraph_ml/examples/care_gnn_example.py  |  3 --
 .../examples/correct_and_smooth_example.py         |  4 +-
 .../src/hugegraph_ml/examples/gatne_example.py     |  2 +-
 hugegraph-ml/src/hugegraph_ml/models/arma.py       |  6 +--
 hugegraph-ml/src/hugegraph_ml/models/bgnn.py       | 11 +-----
 hugegraph-ml/src/hugegraph_ml/models/bgrl.py       |  9 +----
 hugegraph-ml/src/hugegraph_ml/models/care_gnn.py   | 12 +++---
 .../src/hugegraph_ml/models/cluster_gcn.py         |  5 ++-
 .../src/hugegraph_ml/models/correct_and_smooth.py  |  2 +-
 hugegraph-ml/src/hugegraph_ml/models/dagnn.py      |  2 +-
 hugegraph-ml/src/hugegraph_ml/models/deepergcn.py  |  8 ++--
 hugegraph-ml/src/hugegraph_ml/models/gatne.py      | 17 +++-----
 hugegraph-ml/src/hugegraph_ml/models/pgnn.py       | 25 ++++++------
 hugegraph-ml/src/hugegraph_ml/models/seal.py       | 45 +++++++++-------------
 .../hugegraph_ml/tasks/fraud_detector_caregnn.py   | 13 +------
 .../tasks/hetero_sample_embed_gatne.py             |  7 ++--
 .../src/hugegraph_ml/tasks/link_prediction_pgnn.py | 14 +++----
 .../src/hugegraph_ml/tasks/link_prediction_seal.py | 35 ++++-------------
 .../tasks/node_classify_with_sample.py             |  3 +-
 .../src/hugegraph_ml/utils/dgl2hugegraph_utils.py  |  6 +--
 23 files changed, 89 insertions(+), 147 deletions(-)

diff --git a/hugegraph-ml/src/hugegraph_ml/data/hugegraph2dgl.py 
b/hugegraph-ml/src/hugegraph_ml/data/hugegraph2dgl.py
index 92ea00c..14f156d 100644
--- a/hugegraph-ml/src/hugegraph_ml/data/hugegraph2dgl.py
+++ b/hugegraph-ml/src/hugegraph_ml/data/hugegraph2dgl.py
@@ -16,6 +16,7 @@
 # under the License.
 
 # pylint: disable=too-many-branches
+# pylint: disable=C0304
 
 import warnings
 from typing import Optional, List
diff --git a/hugegraph-ml/src/hugegraph_ml/examples/bgnn_example.py 
b/hugegraph-ml/src/hugegraph_ml/examples/bgnn_example.py
index 7c353f3..c395a1d 100644
--- a/hugegraph-ml/src/hugegraph_ml/examples/bgnn_example.py
+++ b/hugegraph-ml/src/hugegraph_ml/examples/bgnn_example.py
@@ -15,6 +15,8 @@
 # specific language governing permissions and limitations
 # under the License.
 
+# pylint: disable=C0103
+
 from hugegraph_ml.models.bgnn import (
     GNNModelDGL,
     BGNNPredictor,
@@ -48,7 +50,7 @@ def bgnn_example():
         gbdt_depth=6,
         gbdt_lr=0.1,
     )
-    metrics = bgnn.fit(
+    _ = bgnn.fit(
         g,
         encoded_X,
         y,
diff --git a/hugegraph-ml/src/hugegraph_ml/examples/bgrl_example.py 
b/hugegraph-ml/src/hugegraph_ml/examples/bgrl_example.py
index 77c03f9..f73ab0c 100644
--- a/hugegraph-ml/src/hugegraph_ml/examples/bgrl_example.py
+++ b/hugegraph-ml/src/hugegraph_ml/examples/bgrl_example.py
@@ -16,7 +16,7 @@
 # under the License.
 
 from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
-from hugegraph_ml.models.bgrl import BGRL, GCN, MLP_Predictor, 
CosineDecayScheduler, get_graph_drop_transform
+from hugegraph_ml.models.bgrl import BGRL, GCN, MLP_Predictor
 from hugegraph_ml.models.mlp import MLPClassifier
 from hugegraph_ml.tasks.node_classify import NodeClassify
 from hugegraph_ml.tasks.node_embed import NodeEmbed
diff --git a/hugegraph-ml/src/hugegraph_ml/examples/care_gnn_example.py 
b/hugegraph-ml/src/hugegraph_ml/examples/care_gnn_example.py
index e6fb52f..723d37d 100644
--- a/hugegraph-ml/src/hugegraph_ml/examples/care_gnn_example.py
+++ b/hugegraph-ml/src/hugegraph_ml/examples/care_gnn_example.py
@@ -18,11 +18,8 @@
 from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
 from hugegraph_ml.models.care_gnn import CAREGNN
 from hugegraph_ml.tasks.fraud_detector_caregnn import DetectorCaregnn
-
-import dgl
 import torch
 
-
 def care_gnn_example(n_epochs=200):
     hg2d = HugeGraph2DGL()
     graph = hg2d.convert_hetero_graph(
diff --git 
a/hugegraph-ml/src/hugegraph_ml/examples/correct_and_smooth_example.py 
b/hugegraph-ml/src/hugegraph_ml/examples/correct_and_smooth_example.py
index aa09808..921fa94 100644
--- a/hugegraph-ml/src/hugegraph_ml/examples/correct_and_smooth_example.py
+++ b/hugegraph-ml/src/hugegraph_ml/examples/correct_and_smooth_example.py
@@ -16,10 +16,8 @@
 # under the License.
 
 from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
-from hugegraph_ml.models.correct_and_smooth import MLP, MLPLinear
+from hugegraph_ml.models.correct_and_smooth import MLP
 from hugegraph_ml.tasks.node_classify import NodeClassify
-import argparse
-
 
 def cs_example(n_epochs=200):
     hg2d = HugeGraph2DGL()
diff --git a/hugegraph-ml/src/hugegraph_ml/examples/gatne_example.py 
b/hugegraph-ml/src/hugegraph_ml/examples/gatne_example.py
index 6a994f0..a92abd1 100644
--- a/hugegraph-ml/src/hugegraph_ml/examples/gatne_example.py
+++ b/hugegraph-ml/src/hugegraph_ml/examples/gatne_example.py
@@ -17,7 +17,7 @@
 
 
 from hugegraph_ml.data.hugegraph2dgl import HugeGraph2DGL
-from hugegraph_ml.models.gatne import DGLGATNE, NeighborSampler
+from hugegraph_ml.models.gatne import DGLGATNE
 from hugegraph_ml.tasks.hetero_sample_embed_gatne import HeteroSampleEmbedGATNE
 
 
diff --git a/hugegraph-ml/src/hugegraph_ml/models/arma.py 
b/hugegraph-ml/src/hugegraph_ml/models/arma.py
index 76ca096..dd8dc0f 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/arma.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/arma.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.right (c) 2024 by jinsong, All Rights Reserved.
 
-# pylint: disable=E1101
+# pylint: disable=E1101,C0103
 
 """
 auto-regressive moving average (ARMA)
@@ -28,11 +28,9 @@ DGL code: 
https://github.com/dmlc/dgl/tree/master/examples/pytorch/arma
 """
 
 import math
-
 import dgl.function as fn
-
 import torch
-import torch.nn as nn
+from torch import nn
 import torch.nn.functional as F
 
 
diff --git a/hugegraph-ml/src/hugegraph_ml/models/bgnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/bgnn.py
index b137043..8015af8 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/bgnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/bgnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1102,E0401,E0711,E0606,E0602
+# pylint: 
disable=E1102,E0401,E0711,E0606,E0602,C0103,C0206,W0612,C0209,R1705,C0200,R1735,W0201
 
 """
 Boost-GNN (BGNN)
@@ -335,14 +335,7 @@ class BGNNPredictor:
         train_metric, val_metric, test_metric = metrics[metric_name][-1]
         if epoch and epoch % logging_epochs == 0:
             pbar.set_description(
-                "Epoch {:05d} | Loss {:.3f} | Loss {:.3f}/{:.3f}/{:.3f} | Time 
{:.4f}".format(
-                    epoch,
-                    loss,
-                    train_metric,
-                    val_metric,
-                    test_metric,
-                    epoch_time,
-                )
+                f"Epoch {epoch:05d} | Loss {loss:.3f} | Loss 
{train_metric:.3f}/{val_metric:.3f}/{test_metric:.3f} | Time {epoch_time:.4f}"
             )
 
     def fit(
diff --git a/hugegraph-ml/src/hugegraph_ml/models/bgrl.py 
b/hugegraph-ml/src/hugegraph_ml/models/bgrl.py
index ed87a43..60b01a0 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/bgrl.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/bgrl.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.right (c) 2024 by jinsong, All Rights Reserved.
 
-# pylint: disable=E1102
+# pylint: disable=E1102,C0103,R1705.R1734
 
 """
 Bootstrapped Graph Latents (BGRL)
@@ -143,7 +143,6 @@ class BGRL(nn.Module):
         target_x = transform_2(graph)
         online_x, target_x = dgl.add_self_loop(online_x), 
dgl.add_self_loop(target_x)
         online_feats, target_feats = online_x.ndata["feat"], 
target_x.ndata["feat"]
-        
         # forward online network
         online_y1 = self.online_encoder(online_x, online_feats)
         # prediction
@@ -151,7 +150,6 @@ class BGRL(nn.Module):
         # forward target network
         with torch.no_grad():
             target_y1 = self.target_encoder(target_x, target_feats).detach()
-            
         # forward online network 2
         online_y2 = self.online_encoder(target_x, target_feats)
         # prediction
@@ -159,7 +157,6 @@ class BGRL(nn.Module):
         # forward target network
         with torch.no_grad():
             target_y2 = self.target_encoder(online_x, online_feats).detach()
-        
         loss = (
             2
             - cosine_similarity(online_q1, target_y1.detach(), dim=-1).mean()
@@ -238,9 +235,7 @@ class CosineDecayScheduler:
             )
         else:
             raise ValueError(
-                "Step ({}) > total number of steps ({}).".format(
-                    step, self.total_steps
-                )
+                f"Step ({step}) > total number of steps ({self.total_steps})."
             )
 
 def get_graph_drop_transform(drop_edge_p, feat_mask_p):
diff --git a/hugegraph-ml/src/hugegraph_ml/models/care_gnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/care_gnn.py
index 84e850f..994513e 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/care_gnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/care_gnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101
+# pylint: disable=E1101,C0103
 
 """
 CAmouflage-REsistant GNN (CARE-GNN)
@@ -30,7 +30,7 @@ DGL code: 
https://github.com/dmlc/dgl/tree/master/examples/pytorch/caregnn
 import dgl.function as fn
 import numpy as np
 import torch as th
-import torch.nn as nn
+from torch import nn
 
 
 class CAREConv(nn.Module):
@@ -100,7 +100,7 @@ class CAREConv(nn.Module):
             g.ndata["h"] = feat
 
             hr = {}
-            for i, etype in enumerate(g.canonical_etypes):
+            for _, etype in enumerate(g.canonical_etypes):
                 g.apply_edges(self._calc_distance, etype=etype)
                 self.dist[etype] = g.edges[etype].data["d"]
                 sampled_edges = self._top_p_sampling(g[etype], self.p[etype])
@@ -109,10 +109,10 @@ class CAREConv(nn.Module):
                 g.send_and_recv(
                     sampled_edges,
                     fn.copy_u("h", "m"),
-                    fn.mean("m", "h_%s" % etype[1]),
+                    fn.mean("m", f"h_{etype[1]}"),
                     etype=etype,
                 )
-                hr[etype] = g.ndata["h_%s" % etype[1]]
+                hr[etype] = g.ndata[f"h_{etype[1]}"]
                 if self.activation is not None:
                     hr[etype] = self.activation(hr[etype])
 
@@ -175,7 +175,7 @@ class CAREGNN(nn.Module):
             )
 
             # Hidden layers with n - 2 layers
-            for i in range(self.num_layers - 2):
+            for _ in range(self.num_layers - 2):
                 self.layers.append(
                     CAREConv(
                         self.hid_dim,
diff --git a/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py 
b/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py
index 09fd04a..231e2bb 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/cluster_gcn.py
@@ -27,10 +27,11 @@ Author's code: 
https://github.com/google-research/google-research/tree/master/cl
 DGL code: https://github.com/dmlc/dgl/tree/master/examples/pytorch/cluster_gcn
 """
 
-import dgl.nn as dglnn
-import torch.nn as nn
+from torch import nn
 import torch.nn.functional as F
 
+import dgl.nn as dglnn
+
 class SAGE(nn.Module):
     def __init__(self, in_feats, n_hidden, n_classes):
         super().__init__()
diff --git a/hugegraph-ml/src/hugegraph_ml/models/correct_and_smooth.py 
b/hugegraph-ml/src/hugegraph_ml/models/correct_and_smooth.py
index 70b8f9e..50481c6 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/correct_and_smooth.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/correct_and_smooth.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101,E1102
+# pylint: disable=E1101,E1102,W0223,R1705
 
 """
  Correct and Smooth (C&S)
diff --git a/hugegraph-ml/src/hugegraph_ml/models/dagnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/dagnn.py
index 8afd8db..77cb1ce 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/dagnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/dagnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101
+# pylint: disable=E1101,C0103
 
 """
 Deep Adaptive Graph Neural Network (DAGNN)
diff --git a/hugegraph-ml/src/hugegraph_ml/models/deepergcn.py 
b/hugegraph-ml/src/hugegraph_ml/models/deepergcn.py
index 0781ded..26b41fc 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/deepergcn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/deepergcn.py
@@ -27,12 +27,12 @@ Author's code: https://github.com/lightaime/deep_gcns_torch
 DGL code: https://github.com/dmlc/dgl/tree/master/examples/pytorch/deepergcn
 """
 
-import dgl.function as fn
-import torch.nn as nn
+import torch
+from torch import nn
 import torch.nn.functional as F
+
+import dgl.function as fn
 from dgl.nn.functional import edge_softmax
-from dgl.nn.pytorch.glob import AvgPooling
-import torch
 
 # pylint: disable=E1101,E0401
 
diff --git a/hugegraph-ml/src/hugegraph_ml/models/gatne.py 
b/hugegraph-ml/src/hugegraph_ml/models/gatne.py
index b0fe329..69cac2d 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/gatne.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/gatne.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.right (c) 2024 by jinsong, All Rights Reserved.
 
-# pylint: disable=E1101
+# pylint: disable=E1101,R0205,C0200,R1732
 
 """
 General Attributed Multiplex HeTerogeneous Network Embedding (GATNE)
@@ -28,24 +28,19 @@ DGL code: 
https://github.com/dmlc/dgl/tree/master/examples/pytorch/GATNE-T
 """
 
 import math
-import os
-import sys
 import time
-from collections import defaultdict
+import multiprocessing
+from functools import partial, reduce
 
 import numpy as np
+
 import torch
 from torch import nn
 import torch.nn.functional as F
-from numpy import random
 from torch.nn.parameter import Parameter
-from tqdm.auto import tqdm
 
 import dgl
 import dgl.function as fn
-import multiprocessing
-from functools import partial, reduce, wraps
-
 
 class NeighborSampler(object):
     def __init__(self, g, num_fanouts):
@@ -232,7 +227,7 @@ def generate_pairs(all_walks, window_size, num_workers):
     # for each node, choose the first neighbor and second neighbor of it to 
form pairs
     # Get all worker processes
     start_time = time.time()
-    print("We are generating pairs with {} cores.".format(num_workers))
+    print(f"We are generating pairs with {num_workers} cores.")
 
     # Start all worker processes
     pool = multiprocessing.Pool(processes=num_workers)
@@ -259,7 +254,7 @@ def generate_pairs(all_walks, window_size, num_workers):
 
     pool.close()
     end_time = time.time()
-    print("Generate pairs end, use {}s.".format(end_time - start_time))
+    print(f"Generate pairs end, use {end_time - start_time}s.")
     return np.array([list(pair) for pair in set(pairs)])
 
 
diff --git a/hugegraph-ml/src/hugegraph_ml/models/pgnn.py 
b/hugegraph-ml/src/hugegraph_ml/models/pgnn.py
index 1c6dcb5..8f51d0c 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/pgnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/pgnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E1101,E0401
+# pylint: disable=E1101,E0401,C0103,R1732,C0200,R1705
 
 """
 Position-aware Graph Neural Networks (P-GNN)
@@ -27,19 +27,20 @@ Author's code: https://github.com/JiaxuanYou/P-GNN
 DGL code: https://github.com/dmlc/dgl/tree/master/examples/pytorch/P-GNN
 """
 
-import dgl.function as fn
-import torch
-from torch import nn
-import torch.nn.functional as F
 import multiprocessing as mp
 import random
 from multiprocessing import get_context
 
+import torch
+from torch import nn
+import torch.nn.functional as F
+
 import networkx as nx
 import numpy as np
 from tqdm.auto import tqdm
 from sklearn.metrics import roc_auc_score
 
+import dgl.function as fn
 
 class PGNN_layer(nn.Module):
     def __init__(self, input_dim, output_dim):
@@ -162,9 +163,9 @@ def split_edges(p, edges, data, non_train_ratio=0.2):
 
     data.update(
         {
-            "{}_edges_train".format(p): edges[:, :split1],  # 80%
-            "{}_edges_val".format(p): edges[:, split1:split2],  # 10%
-            "{}_edges_test".format(p): edges[:, split2:],  # 10%
+            f"{p}_edges_train": edges[:, :split1],  # 80%
+            f"{p}_edges_val": edges[:, split1:split2],  # 10%
+            f"p{}_edges_test": edges[:, split2:],  # 10%
         }
     )
 
@@ -393,8 +394,8 @@ def preselect_anchor(data, num_workers=4):
 def get_loss(p, data, out, loss_func, device, get_auc=True):
     edge_mask = np.concatenate(
         (
-            data["positive_edges_{}".format(p)],
-            data["negative_edges_{}".format(p)],
+            data[f"positive_edges_{p}"],
+            data[f"negative_edges_{p}"],
         ),
         axis=-1,
     )
@@ -410,13 +411,13 @@ def get_loss(p, data, out, loss_func, device, 
get_auc=True):
 
     label_positive = torch.ones(
         [
-            data["positive_edges_{}".format(p)].shape[1],
+            data[f"positive_edges_{p}"].shape[1],
         ],
         dtype=pred.dtype,
     )
     label_negative = torch.zeros(
         [
-            data["negative_edges_{}".format(p)].shape[1],
+            data[f"negative_edges_{p}"].shape[1],
         ],
         dtype=pred.dtype,
     )
diff --git a/hugegraph-ml/src/hugegraph_ml/models/seal.py 
b/hugegraph-ml/src/hugegraph_ml/models/seal.py
index 3c167c5..43d9a89 100644
--- a/hugegraph-ml/src/hugegraph_ml/models/seal.py
+++ b/hugegraph-ml/src/hugegraph_ml/models/seal.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E0401
+# pylint: disable=E0401,R1719,C0103,R0205,R1721.R1705,R0205
 
 """
 SEAL
@@ -27,33 +27,28 @@ Author's code: https://github.com/muhanzhang/SEAL
 DGL code: https://github.com/dmlc/dgl/tree/master/examples/pytorch/seal
 """
 
+import argparse
+import os.path as osp
+from copy import deepcopy
+import logging
+import os
+import time
+
 import torch
-import torch.nn as nn
+from torch import nn
 import torch.nn.functional as F
-
-from dgl.nn.pytorch import GraphConv, SAGEConv, SortPooling, SumPooling
-import argparse
+from torch.utils.data import DataLoader, Dataset
 
 import dgl
+from dgl import add_self_loop, NID
+from dgl.dataloading.negative_sampler import Uniform
+from dgl.nn.pytorch import GraphConv, SAGEConv, SortPooling, SumPooling
 
 import numpy as np
-import pandas as pd
 from ogb.linkproppred import DglLinkPropPredDataset, Evaluator
 from scipy.sparse.csgraph import shortest_path
-
-import os.path as osp
-from copy import deepcopy
-
-from dgl import add_self_loop, DGLGraph, NID
-from dgl.dataloading.negative_sampler import Uniform
-from torch.utils.data import DataLoader, Dataset
 from tqdm import tqdm
 
-import logging
-import os
-import time
-
-
 class GCN(nn.Module):
     """
     GCN Model
@@ -581,7 +576,7 @@ class SEALSampler(object):
         sample_nodes = [target_nodes]
         frontiers = target_nodes
 
-        for i in range(self.hop):
+        for _ in range(self.hop):
             frontiers = self.graph.out_edges(frontiers)[1]
             frontiers = torch.unique(frontiers)
             sample_nodes.append(frontiers)
@@ -622,7 +617,7 @@ class SEALSampler(object):
         subgraph_list = []
         labels_list = []
         edge_dataset = EdgeDataSet(edges, labels, 
transform=self.sample_subgraph)
-        self.print_fn("Using {} workers in sampling 
job.".format(self.num_workers))
+        self.print_fn(f"Using {self.num_workers} workers in sampling job.")
         sampler = DataLoader(
             edge_dataset,
             batch_size=32,
@@ -709,9 +704,7 @@ class SEALData(object):
 
         path = osp.join(
             self.save_dir or "",
-            "{}_{}_{}-hop_{}-subsample.bin".format(
-                self.prefix, split_type, self.hop, subsample_ratio
-            ),
+            
"f{self.prefix}_{split_type}_{self.hop}-hop_{subsample_ratio}-subsample.bin"
         )
 
         if osp.exists(path):
@@ -720,15 +713,15 @@ class SEALData(object):
             dataset = GraphDataSet(graph_list, data["labels"])
 
         else:
-            self.print_fn("Processed {} files not exist.".format(split_type))
+            self.print_fn(f"Processed {split_type} files not exist.")
 
             edges, labels = self.generator(split_type)
-            self.print_fn("Generate {} edges totally.".format(edges.size(0)))
+            self.print_fn(f"Generate {edges.size(0)} edges totally.")
 
             graph_list, labels = self.sampler(edges, labels)
             dataset = GraphDataSet(graph_list, labels)
             dgl.save_graphs(path, graph_list, {"labels": labels})
-            self.print_fn("Save preprocessed subgraph to {}".format(path))
+            self.print_fn(f"Save preprocessed subgraph to {path}")
         return dataset
 
 
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py
index 632cfe3..53518a6 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/fraud_detector_caregnn.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# pylint: disable=E0401
+# pylint: disable=E0401,C0301
 
 import torch
 from torch import nn
@@ -51,7 +51,6 @@ class DetectorCaregnn:
             torch.nonzero(train_mask, 
as_tuple=False).squeeze(1).to(self._device)
         )
         val_idx = torch.nonzero(val_mask, 
as_tuple=False).squeeze(1).to(self._device)
-        test_idx = torch.nonzero(test_mask, 
as_tuple=False).squeeze(1).to(self._device)
         rl_idx = torch.nonzero(
             train_mask.to(self._device) & labels.bool(), as_tuple=False
         ).squeeze(1)
@@ -89,15 +88,7 @@ class DetectorCaregnn:
             tr_loss.backward()
             optimizer.step()
             print(
-                "Epoch {}, Train: Recall: {:.4f} AUC: {:.4f} Loss: {:.4f} | 
Val: Recall: {:.4f} AUC: {:.4f} Loss: {:.4f}".format(
-                    epoch,
-                    tr_recall,
-                    tr_auc,
-                    tr_loss.item(),
-                    val_recall,
-                    val_auc,
-                    val_loss.item(),
-                )
+                f"Epoch {epoch}, Train: Recall: {tr_recall:.4f} AUC: 
{tr_auc:.4f} Loss: {tr_loss.item():.4f} | Val: Recall: {val_recall:.4f} AUC: 
{val_auc:.4f} Loss: {val_loss.item():.4f}"
             )
         self._model.RLModule(self.graph, epoch, rl_idx)
 
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/hetero_sample_embed_gatne.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/hetero_sample_embed_gatne.py
index 62af5d2..91d00ac 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/hetero_sample_embed_gatne.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/hetero_sample_embed_gatne.py
@@ -16,11 +16,11 @@
 # under the License.
 
 
+import random
 import dgl
 import torch
 from torch import nn
 from tqdm.auto import tqdm
-import random
 from hugegraph_ml.models.gatne import (
     construct_typenodes_from_graph,
     generate_pairs,
@@ -58,7 +58,7 @@ class HeteroSampleEmbedGATNE:
         all_walks = []
         for i in range(edge_type_count):
             nodes = torch.LongTensor(type_nodes[i] * 
num_walks).to(self._device)
-            traces, types = dgl.sampling.random_walk(
+            traces, _ = dgl.sampling.random_walk(
                 self.graph,
                 nodes,
                 metapath=[self.graph.etypes[i]] * (neighbor_samples - 1),
@@ -84,14 +84,13 @@ class HeteroSampleEmbedGATNE:
             lr=lr,
         )
 
-        tensors = []
         for epoch in range(n_epochs):
             self._model.train()
             random.shuffle(train_pairs)
 
             data_iter = tqdm(
                 train_dataloader,
-                desc="epoch %d" % (epoch),
+                desc=f"epoch {epoch}",
                 total=(len(train_pairs) + (batch_size - 1)) // batch_size,
             )
             avg_loss = 0.0
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_pgnn.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_pgnn.py
index 8b03cbd..13e761e 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_pgnn.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_pgnn.py
@@ -18,8 +18,6 @@
 import torch
 import dgl
 from torch import nn
-from tqdm import trange
-import numpy as np
 from hugegraph_ml.models.pgnn import (
     get_dataset,
     preselect_anchor,
@@ -85,10 +83,10 @@ class LinkPredictionPGNN:
             if epoch % 100 == 0:
                 print(
                     epoch,
-                    "Loss {:.4f}".format(loss_train),
-                    "Train AUC: {:.4f}".format(auc_train),
-                    "Val AUC: {:.4f}".format(auc_val),
-                    "Test AUC: {:.4f}".format(auc_test),
-                    "Best Val AUC: {:.4f}".format(best_auc_val),
-                    "Best Test AUC: {:.4f}".format(best_auc_test),
+                    f"Loss {loss_train:.4f}",
+                    f"Train AUC: {auc_train:.4f}",
+                    f"Val AUC: {auc_val:.4f}",
+                    f"Test AUC: {auc_test:.4f}",
+                    f"Best Val AUC: {best_auc_val:.4f}",
+                    f"Best Test AUC: {best_auc_test:.4f}",
                 )
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py
index c307cc1..1294942 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/link_prediction_seal.py
@@ -15,20 +15,16 @@
 # specific language governing permissions and limitations
 # under the License.
 
+# pylint: disable=R1728
 
-from typing import Literal
-
+import time
 import torch
+from torch.nn import BCEWithLogitsLoss
 from dgl import DGLGraph, NID, EID
-from torch import nn
-from tqdm import tqdm
 from dgl.dataloading import GraphDataLoader
-from torch.nn import BCEWithLogitsLoss
-import time
+from tqdm import tqdm
 import numpy as np
-from hugegraph_ml.models.seal import SEALData, DGCNN, evaluate_hits
-from hugegraph_ml.utils.early_stopping import EarlyStopping
-
+from hugegraph_ml.models.seal import SEALData, evaluate_hits
 
 class LinkPredictionSeal:
     def __init__(self, graph: DGLGraph, split_edge, model):
@@ -55,8 +51,6 @@ class LinkPredictionSeal:
             num_workers=32,
             print_fn=print,
         )
-        node_attribute = seal_data.ndata["feat"]
-        edge_weight = seal_data.edata["weight"].float()
         train_data = seal_data("train")
         val_data = seal_data("valid")
         test_data = seal_data("test")
@@ -103,9 +97,7 @@ class LinkPredictionSeal:
         optimizer = torch.optim.Adam(parameters, lr=lr)
         loss_fn = BCEWithLogitsLoss()
         print(
-            "Total parameters: {}".format(
-                sum([p.numel() for p in self._model.parameters()])
-            )
+            f"Total parameters: {sum([p.numel() for p in 
self._model.parameters()])}"
         )
 
         # train and evaluate loop
@@ -135,16 +127,7 @@ class LinkPredictionSeal:
                 )
                 evaluate_time = time.time()
                 print(
-                    "Epoch-{}, train loss: {:.4f}, hits@{}: val-{:.4f}, 
test-{:.4f}, "
-                    "cost time: train-{:.1f}s, total-{:.1f}s".format(
-                        epoch,
-                        loss,
-                        50,
-                        val_metric,
-                        test_metric,
-                        train_time - start_time,
-                        evaluate_time - start_time,
-                    )
+                    f"Epoch-{epoch}, train loss: {loss:.4f}, hits@{50}: 
val-{val_metric:.4f}, test-{test_metric:.4f}, cost time: train-{train_time - 
start_time:.1f}s, total-{evaluate_time - start_time:.1f}s"
                 )
                 summary_val.append(val_metric)
                 summary_test.append(test_metric)
@@ -152,9 +135,7 @@ class LinkPredictionSeal:
 
         print("Experiment Results:")
         print(
-            "Best hits@{}: {:.4f}, epoch: {}".format(
-                50, np.max(summary_test), np.argmax(summary_test)
-            )
+            f"Best hits@{50}: {np.max(summary_test):.4f}, epoch: 
{np.argmax(summary_test)}"
         )
 
     @torch.no_grad()
diff --git a/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py 
b/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py
index 393cd09..c60d36c 100644
--- a/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py
+++ b/hugegraph-ml/src/hugegraph_ml/tasks/node_classify_with_sample.py
@@ -15,6 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
+# pylint: disable=C0301
 
 from typing import Literal
 
@@ -140,7 +141,7 @@ class NodeClassifyWithSample:
         test_labels = []
         total_loss = 0
         with torch.no_grad():
-            for it, sg in enumerate(self.dataloader):
+            for _, sg in enumerate(self.dataloader):
                 sg_feats = feats[sg.ndata["_ID"]]
                 sg_labels = labels[sg.ndata["_ID"]]
                 sg_test_msak = test_mask[sg.ndata["_ID"]].bool()
diff --git a/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py 
b/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py
index cdc4ea3..1d652df 100644
--- a/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py
+++ b/hugegraph-ml/src/hugegraph_ml/utils/dgl2hugegraph_utils.py
@@ -17,10 +17,11 @@
 
 # pylint: disable=too-many-branches
 # pylint: disable=too-many-statements
+# pylint: disable=E0401,C0302,C0103,W1514,R1735,R1734,C0206
 
 import os
 from typing import Optional
-
+import json
 import dgl
 import numpy as np
 import scipy
@@ -31,7 +32,6 @@ from dgl.data.utils import _get_dgl_url, download, load_graphs
 import networkx as nx
 from ogb.linkproppred import DglLinkPropPredDataset
 import pandas as pd
-import json
 from pyhugegraph.api.graph import GraphManager
 from pyhugegraph.api.schema import SchemaManager
 from pyhugegraph.client import PyHugeClient
@@ -381,7 +381,6 @@ def import_graph_from_nx(
     # create property schema
     # check props and create vertex label
     vertex_label = f"{dataset_name}_vertex"
-    props_value = {}
     
client_schema.vertexLabel(vertex_label).useAutomaticId().ifNotExist().create()
     # add vertices for batch (note MAX_BATCH_NUM)
     idx_to_vertex_id = {}
@@ -532,7 +531,6 @@ def import_graph_from_ogb(
     else:
         raise ValueError("dataset not supported")
     graph_dgl = dataset_dgl[0]
-    split_edges = dataset_dgl.get_edge_split()
 
     client: PyHugeClient = PyHugeClient(
         ip=ip, port=port, graph=graph, user=user, pwd=pwd, 
graphspace=graphspace

Reply via email to