eric-haibin-lin commented on a change in pull request #8180: Add wide and deep 
model into sparse example
URL: https://github.com/apache/incubator-mxnet/pull/8180#discussion_r150389300
 
 

 ##########
 File path: example/sparse/wide_deep_classification.py
 ##########
 @@ -0,0 +1,125 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import mxnet as mx
+from mxnet.test_utils import *
+from get_data import *
+from wide_deep_model import *
+import argparse
+import os
+
+
+parser = argparse.ArgumentParser(description="Run sparse wide and deep 
classification " \
+                                             "with distributed kvstore",
+                                 
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+parser.add_argument('--num-epoch', type=int, default=10,
+                    help='number of epochs to train')
+parser.add_argument('--batch-size', type=int, default=100,
+                    help='number of examples per batch')
+parser.add_argument('--lr', type=float, default=0.01,
+                    help='learning rate')
+parser.add_argument('--optimizer', type=str, default='adam',
+                    help='what optimizer to use',
+                    choices=["ftrl", "sgd", "adam"])
+parser.add_argument('--log-interval', type=int, default=100,
+                    help='number of batches to wait before logging training 
status')
+
+
+# Related to feature engineering, please see preprocess in get_data.py
+ADULT = {
+    'num_features': 2400,
+    'train': 'adult.data',
+    'test': 'adult.test',
+    'url': 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/',
+    'num_linear_features': 2000,
+    'num_embed_features': 2,
+    'num_cont_features': 38,
+    'embed_input_dims': [1000, 1000],
+    'hidden_units': [8, 50, 100],
+    'positive_class_weight': 2.0,
+}
+
+
+if __name__ == '__main__':
+    import logging
+    head = '%(asctime)-15s %(message)s'
+    logging.basicConfig(level=logging.INFO, format=head)
+
+    # arg parser
+    args = parser.parse_args()
+    logging.info(args)
+    num_epoch = args.num_epoch
+    batch_size = args.batch_size
+    optimizer = args.optimizer
+    log_interval = args.log_interval
+    lr = args.lr
+
+    # dataset    
+    num_features = ADULT['num_features']
+    data_dir = os.path.join(os.getcwd(), 'data')
+    train_data = os.path.join(data_dir, ADULT['train'])
+    val_data = os.path.join(data_dir, ADULT['test'])
+    train_csr, train_dns, train_label = get_uci_adult(data_dir, 
ADULT['train'], ADULT['url'])
+    val_csr, val_dns, val_label = get_uci_adult(data_dir, ADULT['test'], 
ADULT['url'])
+
+    model = wide_deep_model(ADULT['num_linear_features'], 
ADULT['num_embed_features'], ADULT['num_cont_features'],
+                            ADULT['embed_input_dims'], ADULT['hidden_units'], 
ADULT['positive_class_weight'])
+
+    # data iterator
+    train_data = mx.io.NDArrayIter({'csr_data': train_csr, 'dns_data': 
train_dns},
 
 Review comment:
   add a wrapper of train_data = mx.io.PrefetchingIter(train_data) will help 
hide io latency

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to