rezarokni commented on code in PR #23497: URL: https://github.com/apache/beam/pull/23497#discussion_r992432247
########## sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py: ########## @@ -0,0 +1,194 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""This file contains the transformations and utility functions for +the anomaly_detection pipeline.""" +import json + +import numpy as np + +import apache_beam as beam +import config as cfg +import hdbscan +import torch +import yagmail +from apache_beam.ml.inference.base import PredictionResult +from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy +from apache_beam.ml.inference.sklearn_inference import _validate_inference_args +from transformers import AutoTokenizer +from transformers import DistilBertModel + +# [START tokenization] +Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME) + + +def tokenize_sentence(input_dict): + """ + Takes a dictionary with a text and an id, tokenizes the text, and + returns a tuple of the text and id and the tokenized text + + Args: + input_dict: a dictionary with the text and id of the sentence + + Returns: + A tuple of the text and id, and a dictionary of the tokens. + """ + text, uid = input_dict["text"], input_dict["id"] + tokens = Tokenizer([text], padding=True, truncation=True, return_tensors="pt") + tokens = {key: torch.squeeze(val) for key, val in tokens.items()} + return (text, uid), tokens + + +# [END tokenization] + + +# [START DistilBertModelWrapper] +class ModelWrapper(DistilBertModel): + """Wrapper to DistilBertModel to get embeddings when calling + forward function.""" + def forward(self, **kwargs): + output = super().forward(**kwargs) + sentence_embedding = ( + self.mean_pooling(output, + kwargs["attention_mask"]).detach().cpu().numpy()) + return sentence_embedding + + # Mean Pooling - Take attention mask into account for correct averaging + def mean_pooling(self, model_output, attention_mask): + """ + Calculates the mean of token embeddings + + Args: + model_output: The output of the model. + attention_mask: This is a tensor that contains 1s for all input tokens and + 0s for all padding tokens. + + Returns: + The mean of the token embeddings. + """ + token_embeddings = model_output[ + 0] # First element of model_output contains all token embeddings + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()) + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9) + + +# [END DistilBertModelWrapper] + + +# [START CustomSklearnModelHandlerNumpy] +class CustomSklearnModelHandlerNumpy(SklearnModelHandlerNumpy): + # Can be removed once: https://github.com/apache/beam/issues/21863 is fixed + def batch_elements_kwargs(self): + """Limit batch size to 1 for inference""" + return {"max_batch_size": 1} + + # Can be removed once: https://github.com/apache/beam/issues/22572 is fixed + def run_inference(self, batch, model, inference_args=None): + """Runs inferences on a batch of numpy arrays. + + Args: + batch: A sequence of examples as numpy arrays. They should + be single examples. + model: A numpy model or pipeline. Must implement predict(X). + Where the parameter X is a numpy array. + inference_args: Any additional arguments for an inference. + + Returns: + An Iterable of type PredictionResult. + """ + _validate_inference_args(inference_args) + vectorized_batch = np.vstack(batch) + predictions = hdbscan.approximate_predict(model, vectorized_batch) + return [PredictionResult(x, y) for x, y in zip(batch, predictions)] + + +# [END CustomSklearnModelHandlerNumpy] + + +class NormalizeEmbedding(beam.DoFn): + """A DoFn for normalization of text embedding.""" + def process(self, element, *args, **kwargs): + """ + For each element in the input PCollection, normalize the embedding vector, and + yield a new element with the normalized embedding added + + Args: + element: The element to be processed. + """ + (text, uid), prediction = element + embedding = prediction.inference + l2_norm = np.linalg.norm(embedding) + yield {"text": text, "id": uid, "embedding": embedding / l2_norm} + + +class Decode(beam.DoFn): Review Comment: Needs more description , below we have DecodePrediction, maybe here this can be DecodeExample or DecodeInferenceData... ########## sdks/python/apache_beam/examples/inference/anomaly_detection/write_data_to_pubsub_pipeline/main.py: ########## @@ -0,0 +1,89 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""This file contains the pipeline for writing twitter messages to PubSub.""" +import argparse +import sys + +import apache_beam as beam +import config as cfg +from apache_beam.io.gcp.pubsub import WriteToPubSub +from pipeline.options import get_pipeline_options +from pipeline.utils import AssignUniqueID +from pipeline.utils import ConvertToPubSubMessage +from pipeline.utils import get_dataset + + +def parse_arguments(argv): + """ + Parses the arguments passed to the command line and returns them as an object + + Args: + argv: The arguments passed to the command line. + + Returns: + The arguments that are being passed in. + """ + parser = argparse.ArgumentParser(description="write-to-pubsub") + + parser.add_argument( + "-m", + "--mode", + help="Mode to run pipeline in.", + choices=["local", "cloud"], + default="local", + ) + parser.add_argument( + "-p", + "--project", + help="GCP project to run pipeline on.", + default=cfg.PROJECT_ID, + ) + + args, _ = parser.parse_known_args(args=argv) + return args + + +def run(): + """ + Runs the pipeline. It loads the training data, Review Comment: Suggest making clear that this is the injector pipeline that simulates a stream? 'Runs the injector pipeline...; ########## sdks/python/apache_beam/examples/inference/anomaly_detection/write_data_to_pubsub_pipeline/config.py: ########## @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""The file defines global variables.""" +PROJECT_ID = "apache-beam-testing" Review Comment: As an example should this value not be unset? This way the pipeilne will fail with a clean error rather than a ACL error. ########## sdks/python/apache_beam/examples/inference/anomaly_detection/write_data_to_pubsub_pipeline/main.py: ########## @@ -0,0 +1,89 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""This file contains the pipeline for writing twitter messages to PubSub.""" +import argparse +import sys + +import apache_beam as beam +import config as cfg +from apache_beam.io.gcp.pubsub import WriteToPubSub +from pipeline.options import get_pipeline_options +from pipeline.utils import AssignUniqueID +from pipeline.utils import ConvertToPubSubMessage +from pipeline.utils import get_dataset + + +def parse_arguments(argv): + """ + Parses the arguments passed to the command line and returns them as an object + + Args: + argv: The arguments passed to the command line. + + Returns: + The arguments that are being passed in. + """ + parser = argparse.ArgumentParser(description="write-to-pubsub") + + parser.add_argument( + "-m", + "--mode", + help="Mode to run pipeline in.", + choices=["local", "cloud"], + default="local", + ) + parser.add_argument( + "-p", + "--project", + help="GCP project to run pipeline on.", + default=cfg.PROJECT_ID, + ) + + args, _ = parser.parse_known_args(args=argv) + return args + + +def run(): + """ + Runs the pipeline. It loads the training data, + assigns an unique ID to each document, converts it to a PubSub message, and + writes it to PubSub + """ + args = parse_arguments(sys.argv) + pipeline_options = get_pipeline_options( + job_name=cfg.JOB_NAME, + num_workers=cfg.NUM_WORKERS, + project=args.project, + mode=args.mode, + ) + train_categories = ["joy", "love", "fear"] + train_data, _ = get_dataset(train_categories) Review Comment: Should this be the test data rather than the train data? ########## sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py: ########## @@ -0,0 +1,192 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""This file contains the transformations and utility functions for +the anomaly_detection pipeline.""" +import json + +import numpy as np + +import apache_beam as beam +import config as cfg +import hdbscan +import torch +import yagmail +from apache_beam.ml.inference.base import PredictionResult +from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy +from apache_beam.ml.inference.sklearn_inference import _validate_inference_args +from transformers import AutoTokenizer +from transformers import DistilBertModel + +# [START tokenization] +Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME) + + +def tokenize_sentence(input_dict): + """ + It takes a dictionary with a text and an id, tokenizes the text, and + returns a tuple of the text and id and the tokenized text + + Args: + input_dict: a dictionary with the text and id of the sentence + + Returns: + A tuple of the text and id, and a dictionary of the tokens. + """ + text, uid = input_dict["text"], input_dict["id"] + tokens = Tokenizer([text], padding=True, truncation=True, return_tensors="pt") + tokens = {key: torch.squeeze(val) for key, val in tokens.items()} + return (text, uid), tokens + + +# [END tokenization] + + +# [START DistilBertModelWrapper] +class ModelWrapper(DistilBertModel): + """Wrapper to DistilBertModel to get embeddings when calling + forward function.""" + def forward(self, **kwargs): + output = super().forward(**kwargs) + sentence_embedding = ( + self.mean_pooling(output, + kwargs["attention_mask"]).detach().cpu().numpy()) + return sentence_embedding + + # Mean Pooling - Take attention mask into account for correct averaging + def mean_pooling(self, model_output, attention_mask): + """ + The function calculates the mean of token embeddings + + Args: + model_output: The output of the model. + attention_mask: This is a tensor that contains 1s for all input tokens and + 0s for all padding tokens. + + Returns: + The mean of the token embeddings. + """ + token_embeddings = model_output[ + 0] # First element of model_output contains all token embeddings + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()) + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9) + + +# [END DistilBertModelWrapper] + + +# [START CustomSklearnModelHandlerNumpy] +class CustomSklearnModelHandlerNumpy(SklearnModelHandlerNumpy): + # Can be removed once: https://github.com/apache/beam/issues/21863 is fixed + def batch_elements_kwargs(self): + """Limit batch size to 1 for inference""" + return {"max_batch_size": 1} + + def run_inference(self, batch, model, inference_args=None): Review Comment: Can we be more specific about what will be removed , right now its not clear which part of this code will no longer be needed once 21863 and 22572 are fixed. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
