This is an automated email from the ASF dual-hosted git repository.

ishan pushed a commit to branch ishan/upgrade-to-lucene-10
in repository https://gitbox.apache.org/repos/asf/solr.git

commit 3a33fa70095fa447b2e7eef20b68ac893e2365f0
Author: Ishan Chattopadhyaya <[email protected]>
AuthorDate: Thu Aug 7 19:07:46 2025 +0530

    SOLR-17631, SOLR-17839: Removing PreAnalyzed fields
---
 solr/CHANGES.txt                                   |   2 +
 .../apache/solr/schema/JsonPreAnalyzedParser.java  | 290 ----------
 .../org/apache/solr/schema/PreAnalyzedField.java   | 385 -------------
 .../solr/schema/SimplePreAnalyzedParser.java       | 612 ---------------------
 .../PreAnalyzedUpdateProcessorFactory.java         | 177 ------
 .../solr/collection1/conf/schema-preanalyzed.xml   |  44 --
 .../conf/solrconfig-update-processor-chains.xml    |  25 -
 .../conf/managed-schema.xml                        |  41 --
 .../cloud-managed-preanalyzed/conf/solrconfig.xml  |  51 --
 .../PreAnalyzedFieldManagedSchemaCloudTest.java    |  76 ---
 .../apache/solr/schema/PreAnalyzedFieldTest.java   | 278 ----------
 .../processor/PreAnalyzedUpdateProcessorTest.java  | 122 ----
 .../conf/managed-schema.xml                        |   8 -
 .../pages/update-request-processors.adoc           |   2 -
 .../modules/indexing-guide/indexing-nav.adoc       |   1 -
 .../pages/external-files-processes.adoc            | 251 ---------
 .../pages/field-types-included-with-solr.adoc      |   4 -
 .../pages/major-changes-in-solr-10.adoc            |   2 +
 18 files changed, 4 insertions(+), 2367 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c83920d339f..bd258d80b44 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -137,6 +137,8 @@ Deprecation Removals
 
 * SOLR-17779: Removed obsolete 'roles' and 'numShards' core properties, which 
were persisted with but unused (Pierre Salagnac).
 
+* SOLR-17839: Removed PreAnalyzedFields (Ishan Chattopadhyaya)
+
 Dependency Upgrades
 ---------------------
 
diff --git 
a/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java 
b/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java
deleted file mode 100644
index 1a9e9328625..00000000000
--- a/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.schema;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.lang.invoke.MethodHandles;
-import java.nio.ByteBuffer;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Base64;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.Attribute;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.AttributeSource.State;
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.schema.PreAnalyzedField.ParseResult;
-import org.apache.solr.schema.PreAnalyzedField.PreAnalyzedParser;
-import org.noggit.JSONUtil;
-import org.noggit.ObjectBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class JsonPreAnalyzedParser implements PreAnalyzedParser {
-  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String VERSION = "1";
-
-  public static final String VERSION_KEY = "v";
-  public static final String STRING_KEY = "str";
-  public static final String BINARY_KEY = "bin";
-  public static final String TOKENS_KEY = "tokens";
-  public static final String TOKEN_KEY = "t";
-  public static final String OFFSET_START_KEY = "s";
-  public static final String OFFSET_END_KEY = "e";
-  public static final String POSINCR_KEY = "i";
-  public static final String PAYLOAD_KEY = "p";
-  public static final String TYPE_KEY = "y";
-  public static final String FLAGS_KEY = "f";
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public ParseResult parse(Reader reader, AttributeSource parent) throws 
IOException {
-    ParseResult res = new ParseResult();
-    StringBuilder sb = new StringBuilder();
-    char[] buf = new char[128];
-    int cnt;
-    while ((cnt = reader.read(buf)) > 0) {
-      sb.append(buf, 0, cnt);
-    }
-    String val = sb.toString();
-    // empty string - accept even without version number
-    if (val.length() == 0) {
-      return res;
-    }
-    Object o = ObjectBuilder.fromJSONStrict(val);
-    if (!(o instanceof Map)) {
-      throw new IOException("Invalid JSON type " + o.getClass().getName() + ", 
expected Map");
-    }
-    Map<String, Object> map = (Map<String, Object>) o;
-    // check version
-    String version = (String) map.get(VERSION_KEY);
-    if (version == null) {
-      throw new IOException("Missing VERSION key");
-    }
-    if (!VERSION.equals(version)) {
-      throw new IOException("Unknown VERSION '" + version + "', expected " + 
VERSION);
-    }
-    if (map.containsKey(STRING_KEY) && map.containsKey(BINARY_KEY)) {
-      throw new IOException("Field cannot have both stringValue and 
binaryValue");
-    }
-    res.str = (String) map.get(STRING_KEY);
-    String bin = (String) map.get(BINARY_KEY);
-    if (bin != null) {
-      byte[] data = Base64.getDecoder().decode(bin);
-      res.bin = data;
-    }
-    List<Object> tokens = (List<Object>) map.get(TOKENS_KEY);
-    if (tokens == null) {
-      return res;
-    }
-    int tokenStart = 0;
-    int tokenEnd = 0;
-    parent.clearAttributes();
-    for (Object ot : tokens) {
-      tokenStart = tokenEnd + 1; // automatic increment by 1 separator
-      Map<String, Object> tok = (Map<String, Object>) ot;
-      boolean hasOffsetStart = false;
-      boolean hasOffsetEnd = false;
-      int len = -1;
-      for (Entry<String, Object> e : tok.entrySet()) {
-        String key = e.getKey();
-        if (key.equals(TOKEN_KEY)) {
-          CharTermAttribute catt = 
parent.addAttribute(CharTermAttribute.class);
-          String str = String.valueOf(e.getValue());
-          catt.append(str);
-          len = str.length();
-        } else if (key.equals(OFFSET_START_KEY)) {
-          Object obj = e.getValue();
-          hasOffsetStart = true;
-          if (obj instanceof Number) {
-            tokenStart = ((Number) obj).intValue();
-          } else {
-            try {
-              tokenStart = Integer.parseInt(String.valueOf(obj));
-            } catch (NumberFormatException nfe) {
-              log.warn("Invalid {} attribute, skipped: '{}'", 
OFFSET_START_KEY, obj);
-              hasOffsetStart = false;
-            }
-          }
-        } else if (key.equals(OFFSET_END_KEY)) {
-          hasOffsetEnd = true;
-          Object obj = e.getValue();
-          if (obj instanceof Number) {
-            tokenEnd = ((Number) obj).intValue();
-          } else {
-            try {
-              tokenEnd = Integer.parseInt(String.valueOf(obj));
-            } catch (NumberFormatException nfe) {
-              log.warn("Invalid {} attribute, skipped: '{}'", OFFSET_END_KEY, 
obj);
-              hasOffsetEnd = false;
-            }
-          }
-        } else if (key.equals(POSINCR_KEY)) {
-          Object obj = e.getValue();
-          int posIncr = 1;
-          if (obj instanceof Number) {
-            posIncr = ((Number) obj).intValue();
-          } else {
-            try {
-              posIncr = Integer.parseInt(String.valueOf(obj));
-            } catch (NumberFormatException nfe) {
-              log.warn("Invalid {} attribute, skipped: '{}'", POSINCR_KEY, 
obj);
-            }
-          }
-          PositionIncrementAttribute patt = 
parent.addAttribute(PositionIncrementAttribute.class);
-          patt.setPositionIncrement(posIncr);
-        } else if (key.equals(PAYLOAD_KEY)) {
-          String str = String.valueOf(e.getValue());
-          if (str.length() > 0) {
-            byte[] data = Base64.getDecoder().decode(str);
-            PayloadAttribute p = parent.addAttribute(PayloadAttribute.class);
-            if (data != null && data.length > 0) {
-              p.setPayload(new BytesRef(data));
-            }
-          }
-        } else if (key.equals(FLAGS_KEY)) {
-          try {
-            int f = Integer.parseInt(String.valueOf(e.getValue()), 16);
-            FlagsAttribute flags = parent.addAttribute(FlagsAttribute.class);
-            flags.setFlags(f);
-          } catch (NumberFormatException nfe) {
-            log.warn("Invalid {} attribute, skipped: '{}'", FLAGS_KEY, 
e.getValue());
-          }
-        } else if (key.equals(TYPE_KEY)) {
-          TypeAttribute tattr = parent.addAttribute(TypeAttribute.class);
-          tattr.setType(String.valueOf(e.getValue()));
-        } else {
-          log.warn("Unknown attribute, skipped: {} = {}", e.getKey(), 
e.getValue());
-        }
-      }
-      // handle offset attr
-      OffsetAttribute offset = parent.addAttribute(OffsetAttribute.class);
-      if (!hasOffsetEnd && len > -1) {
-        tokenEnd = tokenStart + len;
-      }
-      offset.setOffset(tokenStart, tokenEnd);
-      if (!hasOffsetStart) {
-        tokenStart = tokenEnd + 1;
-      }
-      // capture state and add to result
-      State state = parent.captureState();
-      res.states.add(state.clone());
-      // reset for reuse
-      parent.clearAttributes();
-    }
-    return res;
-  }
-
-  @Override
-  public String toFormattedString(Field f) throws IOException {
-    Map<String, Object> map = new LinkedHashMap<>();
-    map.put(VERSION_KEY, VERSION);
-    if (f.fieldType().stored()) {
-      String stringValue = f.stringValue();
-      if (stringValue != null) {
-        map.put(STRING_KEY, stringValue);
-      }
-      BytesRef binaryValue = f.binaryValue();
-      if (binaryValue != null) {
-        map.put(
-            BINARY_KEY,
-            new String(
-                Base64.getEncoder()
-                    .encode(
-                        ByteBuffer.wrap(binaryValue.bytes, binaryValue.offset, 
binaryValue.length))
-                    .array(),
-                StandardCharsets.ISO_8859_1));
-      }
-    }
-    TokenStream ts = f.tokenStreamValue();
-    if (ts != null) {
-      List<Map<String, Object>> tokens = new ArrayList<>();
-      while (ts.incrementToken()) {
-        Iterator<Class<? extends Attribute>> it = 
ts.getAttributeClassesIterator();
-        String cTerm = null;
-        String tTerm = null;
-        Map<String, Object> tok = new TreeMap<>();
-        while (it.hasNext()) {
-          Class<? extends Attribute> cl = it.next();
-          Attribute att = ts.getAttribute(cl);
-          if (att == null) {
-            continue;
-          }
-          if (cl.isAssignableFrom(CharTermAttribute.class)) {
-            CharTermAttribute catt = (CharTermAttribute) att;
-            cTerm = new String(catt.buffer(), 0, catt.length());
-          } else if (cl.isAssignableFrom(TermToBytesRefAttribute.class)) {
-            TermToBytesRefAttribute tatt = (TermToBytesRefAttribute) att;
-            tTerm = tatt.getBytesRef().utf8ToString();
-          } else {
-            if (cl.isAssignableFrom(FlagsAttribute.class)) {
-              tok.put(FLAGS_KEY, Integer.toHexString(((FlagsAttribute) 
att).getFlags()));
-            } else if (cl.isAssignableFrom(OffsetAttribute.class)) {
-              tok.put(OFFSET_START_KEY, ((OffsetAttribute) att).startOffset());
-              tok.put(OFFSET_END_KEY, ((OffsetAttribute) att).endOffset());
-            } else if (cl.isAssignableFrom(PayloadAttribute.class)) {
-              BytesRef p = ((PayloadAttribute) att).getPayload();
-              if (p != null && p.length > 0) {
-                tok.put(
-                    PAYLOAD_KEY,
-                    new String(
-                        Base64.getEncoder()
-                            .encode(ByteBuffer.wrap(p.bytes, p.offset, 
p.length))
-                            .array(),
-                        StandardCharsets.ISO_8859_1));
-              }
-            } else if (cl.isAssignableFrom(PositionIncrementAttribute.class)) {
-              tok.put(POSINCR_KEY, ((PositionIncrementAttribute) 
att).getPositionIncrement());
-            } else if (cl.isAssignableFrom(TypeAttribute.class)) {
-              tok.put(TYPE_KEY, ((TypeAttribute) att).type());
-            } else {
-              tok.put(cl.getName(), att.toString());
-            }
-          }
-        }
-        String term = null;
-        if (cTerm != null) {
-          term = cTerm;
-        } else {
-          term = tTerm;
-        }
-        if (term != null && term.length() > 0) {
-          tok.put(TOKEN_KEY, term);
-        }
-        tokens.add(tok);
-      }
-      map.put(TOKENS_KEY, tokens);
-    }
-    return JSONUtil.toJSON(map, -1);
-  }
-}
diff --git a/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java 
b/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java
deleted file mode 100644
index 9d1779f887f..00000000000
--- a/solr/core/src/java/org/apache/solr/schema/PreAnalyzedField.java
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.schema;
-
-import static org.apache.solr.common.params.CommonParams.JSON;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.lang.invoke.MethodHandles;
-import java.lang.reflect.Constructor;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.SortedSetSelector;
-import org.apache.lucene.util.AttributeFactory;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.AttributeSource.State;
-import org.apache.solr.analysis.SolrAnalyzer;
-import org.apache.solr.response.TextResponseWriter;
-import org.apache.solr.search.QParser;
-import org.apache.solr.uninverting.UninvertingReader.Type;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Pre-analyzed field type provides a way to index a serialized token stream, 
optionally with an
- * independent stored value of a field.
- */
-public class PreAnalyzedField extends TextField implements 
HasImplicitIndexAnalyzer {
-  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * Init argument name. Value is a fully-qualified class name of the parser 
that implements {@link
-   * PreAnalyzedParser}.
-   */
-  public static final String PARSER_IMPL = "parserImpl";
-
-  private static final String DEFAULT_IMPL = 
JsonPreAnalyzedParser.class.getName();
-
-  private PreAnalyzedParser parser;
-  private PreAnalyzedAnalyzer preAnalyzer;
-
-  @Override
-  public void init(IndexSchema schema, Map<String, String> args) {
-    super.init(schema, args);
-    String implName = args.get(PARSER_IMPL);
-    if (implName == null) {
-      parser = new JsonPreAnalyzedParser();
-    } else {
-      // short name
-      if (JSON.equalsIgnoreCase(implName)) {
-        parser = new JsonPreAnalyzedParser();
-      } else if ("simple".equalsIgnoreCase(implName)) {
-        parser = new SimplePreAnalyzedParser();
-      } else {
-        try {
-          Class<? extends PreAnalyzedParser> implClazz =
-              schema.getSolrClassLoader().findClass(implName, 
PreAnalyzedParser.class);
-          Constructor<?> c = implClazz.getConstructor(new Class<?>[0]);
-          parser = (PreAnalyzedParser) c.newInstance(new Object[0]);
-        } catch (Exception e) {
-          log.warn(
-              "Can't use the configured PreAnalyzedParser class '{}', using 
defualt {}",
-              implName,
-              DEFAULT_IMPL,
-              e);
-          parser = new JsonPreAnalyzedParser();
-        }
-      }
-      args.remove(PARSER_IMPL);
-    }
-    // create Analyzer instance for reuse:
-    preAnalyzer = new PreAnalyzedAnalyzer(parser);
-  }
-
-  /**
-   * Overridden to return an analyzer consisting of a {@link 
PreAnalyzedTokenizer}. NOTE: If an
-   * index analyzer is specified in the schema, it will be ignored.
-   */
-  @Override
-  public Analyzer getIndexAnalyzer() {
-    return preAnalyzer;
-  }
-
-  /**
-   * Returns the query analyzer defined via the schema, unless there is none, 
in which case the
-   * index-time pre-analyzer is returned.
-   *
-   * <p>Note that if the schema specifies an index-time analyzer via either 
{@code <analyzer>} or
-   * {@code <analyzer type="index">}, but no query-time analyzer, the query 
analyzer returned here
-   * will be the index-time analyzer specified in the schema rather than the 
pre-analyzer.
-   */
-  @Override
-  public Analyzer getQueryAnalyzer() {
-    Analyzer queryAnalyzer = super.getQueryAnalyzer();
-    return queryAnalyzer instanceof FieldType.DefaultAnalyzer ? 
getIndexAnalyzer() : queryAnalyzer;
-  }
-
-  @Override
-  public IndexableField createField(SchemaField field, Object value) {
-    IndexableField f = null;
-    try {
-      f = fromString(field, String.valueOf(value));
-    } catch (Exception e) {
-      log.warn("Error parsing pre-analyzed field '{}'", field.getName(), e);
-      return null;
-    }
-    return f;
-  }
-
-  @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    return getSortedSetSortField(
-        field, SortedSetSelector.Type.MIN, top, SortField.STRING_FIRST, 
SortField.STRING_LAST);
-  }
-
-  @Override
-  public ValueSource getValueSource(SchemaField field, QParser parser) {
-    return new SortedSetFieldSource(field.getName());
-  }
-
-  @Override
-  public Type getUninversionType(SchemaField sf) {
-    return Type.SORTED_SET_BINARY;
-  }
-
-  @Override
-  public void write(TextResponseWriter writer, String name, IndexableField f) 
throws IOException {
-    writer.writeStr(name, toExternal(f), true);
-  }
-
-  /**
-   * Utility method to convert a field to a string that is parse-able by this 
class.
-   *
-   * @param f field to convert
-   * @return string that is compatible with the serialization format
-   * @throws IOException If there is a low-level I/O error.
-   */
-  public String toFormattedString(Field f) throws IOException {
-    return parser.toFormattedString(f);
-  }
-
-  /**
-   * Utility method to create a {@link org.apache.lucene.document.FieldType} 
based on the {@link
-   * SchemaField}
-   */
-  public static org.apache.lucene.document.FieldType 
createFieldType(SchemaField field) {
-    if (!field.indexed() && !field.stored()) {
-      log.trace("Ignoring unindexed/unstored field: {}", field);
-      return null;
-    }
-    org.apache.lucene.document.FieldType newType = new 
org.apache.lucene.document.FieldType();
-    newType.setTokenized(field.isTokenized());
-    newType.setStored(field.stored());
-    newType.setOmitNorms(field.omitNorms());
-    IndexOptions options = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
-    if (field.omitTermFreqAndPositions()) {
-      options = IndexOptions.DOCS;
-    } else if (field.omitPositions()) {
-      options = IndexOptions.DOCS_AND_FREQS;
-    } else if (field.storeOffsetsWithPositions()) {
-      options = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
-    }
-    newType.setIndexOptions(options);
-    newType.setStoreTermVectors(field.storeTermVector());
-    newType.setStoreTermVectorOffsets(field.storeTermOffsets());
-    newType.setStoreTermVectorPositions(field.storeTermPositions());
-    newType.setStoreTermVectorPayloads(field.storeTermPayloads());
-    return newType;
-  }
-
-  /** This is a simple holder of a stored part and the collected states 
(tokens with attributes). */
-  public static class ParseResult {
-    public String str;
-    public byte[] bin;
-    public List<State> states = new ArrayList<>();
-  }
-
-  /** Parse the input and return the stored part and the tokens with 
attributes. */
-  public static interface PreAnalyzedParser {
-    /**
-     * Parse input.
-     *
-     * @param reader input to read from
-     * @param parent parent who will own the resulting states (tokens with 
attributes)
-     * @return parse result, with possibly null stored and/or states fields.
-     * @throws IOException if a parsing error or IO error occurs
-     */
-    public ParseResult parse(Reader reader, AttributeSource parent) throws 
IOException;
-
-    /**
-     * Format a field so that the resulting String is valid for parsing with 
{@link #parse(Reader,
-     * AttributeSource)}.
-     *
-     * @param f field instance
-     * @return formatted string
-     * @throws IOException If there is a low-level I/O error.
-     */
-    public String toFormattedString(Field f) throws IOException;
-  }
-
-  public IndexableField fromString(SchemaField field, String val) throws 
Exception {
-    if (val == null || val.trim().length() == 0) {
-      return null;
-    }
-    PreAnalyzedTokenizer parse = new PreAnalyzedTokenizer(parser);
-    Reader reader = new StringReader(val);
-    parse.setReader(reader);
-    parse.decodeInput(reader); // consume
-    parse.reset();
-    org.apache.lucene.document.FieldType type = createFieldType(field);
-    if (type == null) {
-      parse.close();
-      return null;
-    }
-    Field f = null;
-    if (parse.getStringValue() != null) {
-      if (field.stored()) {
-        f = new Field(field.getName(), parse.getStringValue(), type);
-      } else {
-        type.setStored(false);
-      }
-    } else if (parse.getBinaryValue() != null) {
-      if (field.isBinary()) {
-        f = new Field(field.getName(), parse.getBinaryValue(), type);
-      }
-    } else {
-      type.setStored(false);
-    }
-
-    if (parse.hasTokenStream()) {
-      if (field.indexed()) {
-        type.setTokenized(true);
-        if (f != null) {
-          f.setTokenStream(parse);
-        } else {
-          f = new Field(field.getName(), parse, type);
-        }
-      } else {
-        if (f != null) {
-          type.setIndexOptions(IndexOptions.NONE);
-          type.setTokenized(false);
-        }
-      }
-    }
-    return f;
-  }
-
-  /** Token stream that works from a list of saved states. */
-  private static class PreAnalyzedTokenizer extends Tokenizer {
-    private final List<AttributeSource.State> cachedStates = new ArrayList<>();
-    private Iterator<AttributeSource.State> it = null;
-    private String stringValue = null;
-    private byte[] binaryValue = null;
-    private PreAnalyzedParser parser;
-    private IOException readerConsumptionException;
-    private int lastEndOffset;
-
-    public PreAnalyzedTokenizer(PreAnalyzedParser parser) {
-      // we don't pack attributes: since we are used for (de)serialization and 
dont want bloat.
-      super(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
-      this.parser = parser;
-    }
-
-    public boolean hasTokenStream() {
-      return !cachedStates.isEmpty();
-    }
-
-    public String getStringValue() {
-      return stringValue;
-    }
-
-    public byte[] getBinaryValue() {
-      return binaryValue;
-    }
-
-    @Override
-    public final boolean incrementToken() {
-      if (!it.hasNext()) {
-        return false;
-      }
-
-      AttributeSource.State state = it.next();
-      restoreState(state.clone());
-      // TODO: why can't I lookup the OffsetAttribute up in ctor instead?
-      lastEndOffset = addAttribute(OffsetAttribute.class).endOffset();
-      return true;
-    }
-
-    /**
-     * Throws a delayed exception if one was thrown from decodeInput() while 
reading from the input
-     * reader.
-     */
-    @Override
-    public final void reset() throws IOException {
-      super.reset();
-      if (readerConsumptionException != null) {
-        IOException e = new IOException(readerConsumptionException);
-        readerConsumptionException = null;
-        throw e;
-      }
-      it = cachedStates.iterator();
-    }
-
-    @Override
-    public void end() throws IOException {
-      super.end();
-      // we must set the end offset correctly so multi-valued fields don't try 
to send offsets
-      // backwards:
-      addAttribute(OffsetAttribute.class).setOffset(lastEndOffset, 
lastEndOffset);
-    }
-
-    private void setReaderConsumptionException(IOException e) {
-      readerConsumptionException = e;
-    }
-
-    /** Parses the input reader and adds attributes specified there. */
-    private void decodeInput(Reader reader) throws IOException {
-      removeAllAttributes(); // reset attributes to the empty set
-      cachedStates.clear();
-      stringValue = null;
-      binaryValue = null;
-      try {
-        ParseResult res = parser.parse(reader, this);
-        if (res != null) {
-          stringValue = res.str;
-          binaryValue = res.bin;
-          if (res.states != null) {
-            cachedStates.addAll(res.states);
-          }
-        }
-      } catch (IOException e) {
-        removeAllAttributes(); // reset attributes to the empty set
-        throw e; // rethrow
-      }
-    }
-  }
-
-  private static class PreAnalyzedAnalyzer extends SolrAnalyzer {
-    private PreAnalyzedParser parser;
-
-    PreAnalyzedAnalyzer(PreAnalyzedParser parser) {
-      this.parser = parser;
-    }
-
-    @Override
-    protected TokenStreamComponents createComponents(String fieldName) {
-      final PreAnalyzedTokenizer tokenizer = new PreAnalyzedTokenizer(parser);
-      return new TokenStreamComponents(
-          r -> {
-            try {
-              tokenizer.decodeInput(r);
-            } catch (IOException e) {
-              tokenizer.setReaderConsumptionException(e);
-            }
-          },
-          tokenizer);
-    }
-  }
-}
diff --git 
a/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java 
b/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java
deleted file mode 100644
index 79f856d3c77..00000000000
--- a/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java
+++ /dev/null
@@ -1,612 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.schema;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.Reader;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.Attribute;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.AttributeSource.State;
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.schema.PreAnalyzedField.ParseResult;
-import org.apache.solr.schema.PreAnalyzedField.PreAnalyzedParser;
-
-/**
- * Simple plain text format parser for {@link PreAnalyzedField}.
- *
- * <h2>Serialization format</h2>
- *
- * <p>The format of the serialization is as follows:
- *
- * <pre>
- * content ::= version (stored)? tokens
- * version ::= digit+ " "
- * ; stored field value - any "=" inside must be escaped!
- * stored ::= "=" text "="
- * tokens ::= (token ((" ") + token)*)*
- * token ::= text ("," attrib)*
- * attrib ::= name '=' value
- * name ::= text
- * value ::= text
- * </pre>
- *
- * <p>Special characters in "text" values can be escaped using the escape 
character \ . The
- * following escape sequences are recognized:
- *
- * <pre>
- * "\ " - literal space character
- * "\," - literal , character
- * "\=" - literal = character
- * "\\" - literal \ character
- * "\n" - newline
- * "\r" - carriage return
- * "\t" - horizontal tab
- * </pre>
- *
- * Please note that Unicode sequences (e.g. &#92;u0001) are not supported.
- *
- * <h2>Supported attribute names</h2>
- *
- * The following token attributes are supported, and identified with short 
symbolic names:
- *
- * <pre>
- * i - position increment (integer)
- * s - token offset, start position (integer)
- * e - token offset, end position (integer)
- * t - token type (string)
- * f - token flags (hexadecimal integer)
- * p - payload (bytes in hexadecimal format; whitespace is ignored)
- * </pre>
- *
- * Token offsets are tracked and implicitly added to the token stream - the 
start and end offsets
- * consider only the term text and whitespace, and exclude the space taken by 
token attributes.
- *
- * <h2>Example token streams</h2>
- *
- * <pre>
- * 1 one two three
- * - version 1
- * - stored: 'null'
- * - tok: '(term=one,startOffset=0,endOffset=3)'
- * - tok: '(term=two,startOffset=4,endOffset=7)'
- * - tok: '(term=three,startOffset=8,endOffset=13)'
- * 1 one  two   three
- * - version 1
- * - stored: 'null'
- * - tok: '(term=one,startOffset=0,endOffset=3)'
- * - tok: '(term=two,startOffset=5,endOffset=8)'
- * - tok: '(term=three,startOffset=11,endOffset=16)'
- * 1 one,s=123,e=128,i=22  two three,s=20,e=22
- * - version 1
- * - stored: 'null'
- * - tok: '(term=one,positionIncrement=22,startOffset=123,endOffset=128)'
- * - tok: '(term=two,positionIncrement=1,startOffset=5,endOffset=8)'
- * - tok: '(term=three,positionIncrement=1,startOffset=20,endOffset=22)'
- * 1 \ one\ \,,i=22,a=\, two\=
- *
- * \n,\ =\   \
- * - version 1
- * - stored: 'null'
- * - tok: '(term= one ,,positionIncrement=22,startOffset=0,endOffset=6)'
- * - tok: '(term=two=
- *
- *
- * ,positionIncrement=1,startOffset=7,endOffset=15)'
- * - tok: '(term=\,positionIncrement=1,startOffset=17,endOffset=18)'
- * 1 ,i=22 ,i=33,s=2,e=20 ,
- * - version 1
- * - stored: 'null'
- * - tok: '(term=,positionIncrement=22,startOffset=0,endOffset=0)'
- * - tok: '(term=,positionIncrement=33,startOffset=2,endOffset=20)'
- * - tok: '(term=,positionIncrement=1,startOffset=2,endOffset=2)'
- * 1 =This is the stored part with \=
- * \n    \t escapes.=one two three
- * - version 1
- * - stored: 'This is the stored part with =
- * \n    \t escapes.'
- * - tok: '(term=one,startOffset=0,endOffset=3)'
- * - tok: '(term=two,startOffset=4,endOffset=7)'
- * - tok: '(term=three,startOffset=8,endOffset=13)'
- * 1 ==
- * - version 1
- * - stored: ''
- * - (no tokens)
- * 1 =this is a test.=
- * - version 1
- * - stored: 'this is a test.'
- * - (no tokens)
- * </pre>
- */
-public final class SimplePreAnalyzedParser implements PreAnalyzedParser {
-  static final String VERSION = "1";
-
-  private static class Tok {
-    StringBuilder token = new StringBuilder();
-    Map<String, String> attr = new HashMap<>();
-
-    public boolean isEmpty() {
-      return token.length() == 0 && attr.size() == 0;
-    }
-
-    public void reset() {
-      token.setLength(0);
-      attr.clear();
-    }
-
-    @Override
-    public String toString() {
-      return "tok='" + token + "',attr=" + attr;
-    }
-  }
-
-  // parser state
-  private static enum S {
-    TOKEN,
-    NAME,
-    VALUE,
-    UNDEF
-  };
-
-  private static final byte[] EMPTY_BYTES = new byte[0];
-
-  /** Utility method to convert a hex string to a byte array. */
-  static byte[] hexToBytes(String hex) {
-    if (hex == null) {
-      return EMPTY_BYTES;
-    }
-    hex = hex.replaceAll("\\s+", "");
-    if (hex.length() == 0) {
-      return EMPTY_BYTES;
-    }
-    ByteArrayOutputStream baos = new ByteArrayOutputStream(hex.length() / 2);
-    byte b;
-    for (int i = 0; i < hex.length(); i++) {
-      int high = charToNibble(hex.charAt(i));
-      int low = 0;
-      if (i < hex.length() - 1) {
-        i++;
-        low = charToNibble(hex.charAt(i));
-      }
-      b = (byte) (high << 4 | low);
-      baos.write(b);
-    }
-    return baos.toByteArray();
-  }
-
-  static final int charToNibble(char c) {
-    if (c >= '0' && c <= '9') {
-      return c - '0';
-    } else if (c >= 'a' && c <= 'f') {
-      return 0xa + (c - 'a');
-    } else if (c >= 'A' && c <= 'F') {
-      return 0xA + (c - 'A');
-    } else {
-      throw new RuntimeException("Not a hex character: '" + c + "'");
-    }
-  }
-
-  static String bytesToHex(byte bytes[], int offset, int length) {
-    StringBuilder sb = new StringBuilder();
-    for (int i = offset; i < offset + length; ++i) {
-      sb.append(Integer.toHexString(0x0100 + (bytes[i] & 
0x00FF)).substring(1));
-    }
-    return sb.toString();
-  }
-
-  public SimplePreAnalyzedParser() {}
-
-  @Override
-  public ParseResult parse(Reader reader, AttributeSource parent) throws 
IOException {
-    ParseResult res = new ParseResult();
-    StringBuilder sb = new StringBuilder();
-    char[] buf = new char[128];
-    int cnt;
-    while ((cnt = reader.read(buf)) > 0) {
-      sb.append(buf, 0, cnt);
-    }
-    String val = sb.toString();
-    // empty string - accept even without version number
-    if (val.length() == 0) {
-      return res;
-    }
-    // first consume the version
-    int idx = val.indexOf(' ');
-    if (idx == -1) {
-      throw new IOException("Missing VERSION token");
-    }
-    String version = val.substring(0, idx);
-    if (!VERSION.equals(version)) {
-      throw new IOException("Unknown VERSION " + version);
-    }
-    val = val.substring(idx + 1);
-    // then consume the optional stored part
-    int tsStart = 0;
-    boolean hasStored = false;
-    StringBuilder storedBuf = new StringBuilder();
-    if (val.charAt(0) == '=') {
-      hasStored = true;
-      if (val.length() > 1) {
-        for (int i = 1; i < val.length(); i++) {
-          char c = val.charAt(i);
-          if (c == '\\') {
-            if (i < val.length() - 1) {
-              c = val.charAt(++i);
-              if (c == '=') { // we recognize only \= escape in the stored part
-                storedBuf.append('=');
-              } else {
-                storedBuf.append('\\');
-                storedBuf.append(c);
-                continue;
-              }
-            } else {
-              storedBuf.append(c);
-              continue;
-            }
-          } else if (c == '=') {
-            // end of stored text
-            tsStart = i + 1;
-            break;
-          } else {
-            storedBuf.append(c);
-          }
-        }
-        if (tsStart == 0) { // missing end-of-stored marker
-          throw new IOException("Missing end marker of stored part");
-        }
-      } else {
-        throw new IOException("Unexpected end of stored field");
-      }
-    }
-    if (hasStored) {
-      res.str = storedBuf.toString();
-    }
-    Tok tok = new Tok();
-    StringBuilder attName = new StringBuilder();
-    StringBuilder attVal = new StringBuilder();
-    // parser state
-    S s = S.UNDEF;
-    int lastPos = 0;
-    for (int i = tsStart; i < val.length(); i++) {
-      char c = val.charAt(i);
-      if (c == ' ') {
-        // collect leftovers
-        switch (s) {
-          case VALUE:
-            if (attVal.length() == 0) {
-              throw new IOException(
-                  "Unexpected character '"
-                      + c
-                      + "' at position "
-                      + i
-                      + " - empty value of attribute.");
-            }
-            if (attName.length() > 0) {
-              tok.attr.put(attName.toString(), attVal.toString());
-            }
-            break;
-          case NAME: // attr name without a value ?
-            if (attName.length() > 0) {
-              throw new IOException(
-                  "Unexpected character '"
-                      + c
-                      + "' at position "
-                      + i
-                      + " - missing attribute value.");
-            } else {
-              // accept missing att name and value
-            }
-            break;
-          case TOKEN:
-          case UNDEF:
-            // do nothing, advance to next token
-        }
-        attName.setLength(0);
-        attVal.setLength(0);
-        if (!tok.isEmpty() || s == S.NAME) {
-          AttributeSource.State state = createState(parent, tok, lastPos);
-          if (state != null) res.states.add(state.clone());
-        }
-        // reset tok
-        s = S.UNDEF;
-        tok.reset();
-        // skip
-        lastPos++;
-        continue;
-      }
-      StringBuilder tgt = null;
-      switch (s) {
-        case TOKEN:
-          tgt = tok.token;
-          break;
-        case NAME:
-          tgt = attName;
-          break;
-        case VALUE:
-          tgt = attVal;
-          break;
-        case UNDEF:
-          tgt = tok.token;
-          s = S.TOKEN;
-      }
-      if (c == '\\') {
-        if (s == S.TOKEN) lastPos++;
-        if (i >= val.length() - 1) { // end
-
-          tgt.append(c);
-          continue;
-        } else {
-          c = val.charAt(++i);
-          switch (c) {
-            case '\\':
-            case '=':
-            case ',':
-            case ' ':
-              tgt.append(c);
-              break;
-            case 'n':
-              tgt.append('\n');
-              break;
-            case 'r':
-              tgt.append('\r');
-              break;
-            case 't':
-              tgt.append('\t');
-              break;
-            default:
-              tgt.append('\\');
-              tgt.append(c);
-              lastPos++;
-          }
-        }
-      } else {
-        // state switch
-        if (c == ',') {
-          if (s == S.TOKEN) {
-            s = S.NAME;
-          } else if (s == S.VALUE) { // end of value, start of next attr
-            if (attVal.length() == 0) {
-              throw new IOException(
-                  "Unexpected character '"
-                      + c
-                      + "' at position "
-                      + i
-                      + " - empty value of attribute.");
-            }
-            if (attName.length() > 0 && attVal.length() > 0) {
-              tok.attr.put(attName.toString(), attVal.toString());
-            }
-            // reset
-            attName.setLength(0);
-            attVal.setLength(0);
-            s = S.NAME;
-          } else {
-            throw new IOException(
-                "Unexpected character '"
-                    + c
-                    + "' at position "
-                    + i
-                    + " - missing attribute value.");
-          }
-        } else if (c == '=') {
-          if (s == S.NAME) {
-            s = S.VALUE;
-          } else {
-            throw new IOException(
-                "Unexpected character '"
-                    + c
-                    + "' at position "
-                    + i
-                    + " - empty value of attribute.");
-          }
-        } else {
-          tgt.append(c);
-          if (s == S.TOKEN) lastPos++;
-        }
-      }
-    }
-    // collect leftovers
-    if (!tok.isEmpty() || s == S.NAME || s == S.VALUE) {
-      // remaining attrib?
-      if (s == S.VALUE) {
-        if (attName.length() > 0 && attVal.length() > 0) {
-          tok.attr.put(attName.toString(), attVal.toString());
-        }
-      }
-      AttributeSource.State state = createState(parent, tok, lastPos);
-      if (state != null) res.states.add(state.clone());
-    }
-    return res;
-  }
-
-  private static AttributeSource.State createState(AttributeSource a, Tok 
state, int tokenEnd) {
-    a.clearAttributes();
-    CharTermAttribute termAtt = a.addAttribute(CharTermAttribute.class);
-    char[] tokChars = state.token.toString().toCharArray();
-    termAtt.copyBuffer(tokChars, 0, tokChars.length);
-    int tokenStart = tokenEnd - state.token.length();
-    for (Entry<String, String> e : state.attr.entrySet()) {
-      String k = e.getKey();
-      if (k.equals("i")) {
-        // position increment
-        int incr = Integer.parseInt(e.getValue());
-        PositionIncrementAttribute posIncr = 
a.addAttribute(PositionIncrementAttribute.class);
-        posIncr.setPositionIncrement(incr);
-      } else if (k.equals("s")) {
-        tokenStart = Integer.parseInt(e.getValue());
-      } else if (k.equals("e")) {
-        tokenEnd = Integer.parseInt(e.getValue());
-      } else if (k.equals("y")) {
-        TypeAttribute type = a.addAttribute(TypeAttribute.class);
-        type.setType(e.getValue());
-      } else if (k.equals("f")) {
-        FlagsAttribute flags = a.addAttribute(FlagsAttribute.class);
-        int f = Integer.parseInt(e.getValue(), 16);
-        flags.setFlags(f);
-      } else if (k.equals("p")) {
-        PayloadAttribute p = a.addAttribute(PayloadAttribute.class);
-        byte[] data = hexToBytes(e.getValue());
-        if (data != null && data.length > 0) {
-          p.setPayload(new BytesRef(data));
-        }
-      } else {
-        // unknown attribute
-      }
-    }
-    // handle offset attr
-    OffsetAttribute offset = a.addAttribute(OffsetAttribute.class);
-    offset.setOffset(tokenStart, tokenEnd);
-    State resState = a.captureState();
-    a.clearAttributes();
-    return resState;
-  }
-
-  @Override
-  public String toFormattedString(Field f) throws IOException {
-    StringBuilder sb = new StringBuilder();
-    sb.append(VERSION + " ");
-    if (f.fieldType().stored()) {
-      String s = f.stringValue();
-      if (s != null) {
-        // encode the equals sign
-        sb.append('=');
-        sb.append(s);
-        sb.append('=');
-      }
-    }
-    TokenStream ts = f.tokenStreamValue();
-    if (ts != null) {
-      StringBuilder tok = new StringBuilder();
-      boolean next = false;
-      while (ts.incrementToken()) {
-        if (next) {
-          sb.append(' ');
-        } else {
-          next = true;
-        }
-        tok.setLength(0);
-        Iterator<Class<? extends Attribute>> it = 
ts.getAttributeClassesIterator();
-        String cTerm = null;
-        String tTerm = null;
-        while (it.hasNext()) {
-          Class<? extends Attribute> cl = it.next();
-          Attribute att = ts.getAttribute(cl);
-          if (att == null) {
-            continue;
-          }
-          if (cl.isAssignableFrom(CharTermAttribute.class)) {
-            CharTermAttribute catt = (CharTermAttribute) att;
-            cTerm = escape(catt.buffer(), catt.length());
-          } else if (cl.isAssignableFrom(TermToBytesRefAttribute.class)) {
-            TermToBytesRefAttribute tatt = (TermToBytesRefAttribute) att;
-            char[] tTermChars = 
tatt.getBytesRef().utf8ToString().toCharArray();
-            tTerm = escape(tTermChars, tTermChars.length);
-          } else {
-            if (tok.length() > 0) tok.append(',');
-            if (cl.isAssignableFrom(FlagsAttribute.class)) {
-              tok.append("f=").append(Integer.toHexString(((FlagsAttribute) 
att).getFlags()));
-            } else if (cl.isAssignableFrom(OffsetAttribute.class)) {
-              tok.append("s=")
-                  .append(((OffsetAttribute) att).startOffset())
-                  .append(",e=")
-                  .append(((OffsetAttribute) att).endOffset());
-            } else if (cl.isAssignableFrom(PayloadAttribute.class)) {
-              BytesRef p = ((PayloadAttribute) att).getPayload();
-              if (p != null && p.length > 0) {
-                tok.append("p=").append(bytesToHex(p.bytes, p.offset, 
p.length));
-              } else if (tok.length() > 0) {
-                tok.setLength(tok.length() - 1); // remove the last comma
-              }
-            } else if (cl.isAssignableFrom(PositionIncrementAttribute.class)) {
-              tok.append("i=").append(((PositionIncrementAttribute) 
att).getPositionIncrement());
-            } else if (cl.isAssignableFrom(TypeAttribute.class)) {
-              tok.append("y=").append(escape(((TypeAttribute) att).type()));
-            } else {
-
-              
tok.append(cl.getName()).append('=').append(escape(att.toString()));
-            }
-          }
-        }
-        String term = null;
-        if (cTerm != null) {
-          term = cTerm;
-        } else {
-          term = tTerm;
-        }
-        if (term != null && term.length() > 0) {
-          if (tok.length() > 0) {
-            tok.insert(0, term + ",");
-          } else {
-            tok.insert(0, term);
-          }
-        }
-        sb.append(tok);
-      }
-    }
-    return sb.toString();
-  }
-
-  String escape(String val) {
-    return escape(val.toCharArray(), val.length());
-  }
-
-  String escape(char[] val, int len) {
-    if (val == null || len == 0) {
-      return "";
-    }
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < len; i++) {
-      switch (val[i]) {
-        case '\\':
-        case '=':
-        case ',':
-        case ' ':
-          sb.append('\\');
-          sb.append(val[i]);
-          break;
-        case '\n':
-          sb.append('\\');
-          sb.append('n');
-          break;
-        case '\r':
-          sb.append('\\');
-          sb.append('r');
-          break;
-        case '\t':
-          sb.append('\\');
-          sb.append('t');
-          break;
-        default:
-          sb.append(val[i]);
-      }
-    }
-    return sb.toString();
-  }
-}
diff --git 
a/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java
 
b/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java
deleted file mode 100644
index f5ed21b2f73..00000000000
--- 
a/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.update.processor;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.IndexableField;
-import org.apache.solr.common.SolrInputField;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.JsonPreAnalyzedParser;
-import org.apache.solr.schema.PreAnalyzedField;
-import org.apache.solr.schema.PreAnalyzedField.PreAnalyzedParser;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.schema.SimplePreAnalyzedParser;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * An update processor that parses configured fields of any document being 
added using {@link
- * PreAnalyzedField} with the configured format parser.
- *
- * <p>Fields are specified using the same patterns as in {@link
- * FieldMutatingUpdateProcessorFactory}. They are then checked whether they 
follow a pre-analyzed
- * format defined by <code>parser</code>. Valid fields are then parsed. The 
original {@link
- * SchemaField} is used for the initial creation of {@link IndexableField}, 
which is then modified
- * to add the results from parsing (token stream value and/or string value) 
and then it will be
- * directly added to the final Lucene {@link Document} to be indexed.
- *
- * <p>Fields that are declared in the patterns list but are not present in the 
current schema will
- * be removed from the input document.
- *
- * <h2>Implementation details</h2>
- *
- * <p>This update processor uses {@link PreAnalyzedParser} to parse the 
original field content
- * (interpreted as a string value), and thus obtain the stored part and the 
token stream part. Then
- * it creates the "template" {@link Field}-s using the original {@link
- * SchemaField#createFields(Object)} as declared in the current schema. 
Finally it sets the
- * pre-analyzed parts if available (string value and the token stream value) 
on the first field of
- * these "template" fields. If the declared field type does not support stored 
or indexed parts then
- * such parts are silently discarded. Finally the updated "template" {@link 
Field}-s are added to
- * the resulting {@link SolrInputField}, and the original value of that field 
is removed.
- *
- * <h2>Example configuration</h2>
- *
- * <p>In the example configuration below there are two update chains, one that 
uses the "simple"
- * parser ({@link SimplePreAnalyzedParser}) and one that uses the "json" 
parser ({@link
- * JsonPreAnalyzedParser}). Field "nonexistent" will be removed from input 
documents if not present
- * in the schema. Other fields will be analyzed and if valid they will be 
converted to {@link
- * IndexableField}-s or if they are not in a valid format that can be parsed 
with the selected
- * parser they will be passed as-is. Assuming that <code>ssto</code> field is 
stored but not
- * indexed, and <code>sind</code> field is indexed but not stored: if 
<code>ssto</code> input value
- * contains the indexed part then this part will be discarded and only the 
stored value part will be
- * retained. Similarly, if <code>sind</code> input value contains the stored 
part then it will be
- * discarded and only the token stream part will be retained.
- *
- * <pre class="prettyprint">
- *   &lt;updateRequestProcessorChain name="pre-analyzed-simple"&gt;
- *    &lt;processor class="solr.PreAnalyzedUpdateProcessorFactory"&gt;
- *      &lt;str name="fieldName"&gt;title&lt;/str&gt;
- *      &lt;str name="fieldName"&gt;nonexistent&lt;/str&gt;
- *      &lt;str name="fieldName"&gt;ssto&lt;/str&gt;
- *      &lt;str name="fieldName"&gt;sind&lt;/str&gt;
- *      &lt;str name="parser"&gt;simple&lt;/str&gt;
- *    &lt;/processor&gt;
- *    &lt;processor class="solr.RunUpdateProcessorFactory" /&gt;
- *  &lt;/updateRequestProcessorChain&gt;
- *
- *  &lt;updateRequestProcessorChain name="pre-analyzed-json"&gt;
- *    &lt;processor class="solr.PreAnalyzedUpdateProcessorFactory"&gt;
- *      &lt;str name="fieldName"&gt;title&lt;/str&gt;
- *      &lt;str name="fieldName"&gt;nonexistent&lt;/str&gt;
- *      &lt;str name="fieldName"&gt;ssto&lt;/str&gt;
- *      &lt;str name="fieldName"&gt;sind&lt;/str&gt;
- *      &lt;str name="parser"&gt;json&lt;/str&gt;
- *    &lt;/processor&gt;
- *    &lt;processor class="solr.RunUpdateProcessorFactory" /&gt;
- *  &lt;/updateRequestProcessorChain&gt;
- *  </pre>
- *
- * @since 4.3.0
- */
-public class PreAnalyzedUpdateProcessorFactory extends 
FieldMutatingUpdateProcessorFactory {
-
-  private PreAnalyzedField parser;
-  private String parserImpl;
-
-  @Override
-  public void init(final NamedList<?> args) {
-    parserImpl = (String) args.get("parser");
-    args.remove("parser");
-    // initialize inclusion / exclusion patterns
-    super.init(args);
-  }
-
-  @Override
-  public UpdateRequestProcessor getInstance(
-      SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor 
next) {
-    return new PreAnalyzedUpdateProcessor(getSelector(), next, 
req.getSchema(), parser);
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    super.inform(core);
-    parser = new PreAnalyzedField();
-    Map<String, String> args = new HashMap<>();
-    if (parserImpl != null) {
-      args.put(PreAnalyzedField.PARSER_IMPL, parserImpl);
-    }
-    parser.init(core.getLatestSchema(), args);
-  }
-}
-
-class PreAnalyzedUpdateProcessor extends FieldMutatingUpdateProcessor {
-
-  private PreAnalyzedField parser;
-  private IndexSchema schema;
-
-  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public PreAnalyzedUpdateProcessor(
-      FieldNameSelector sel,
-      UpdateRequestProcessor next,
-      IndexSchema schema,
-      PreAnalyzedField parser) {
-    super(sel, next);
-    this.schema = schema;
-    this.parser = parser;
-  }
-
-  @Override
-  protected SolrInputField mutate(SolrInputField src) {
-    SchemaField sf = schema.getFieldOrNull(src.getName());
-    if (sf == null) { // remove this field
-      return null;
-    }
-    FieldType type = PreAnalyzedField.createFieldType(sf);
-    if (type == null) { // neither indexed nor stored - skip
-      return null;
-    }
-    SolrInputField res = new SolrInputField(src.getName());
-    for (Object o : src) {
-      if (o == null) {
-        continue;
-      }
-      Field pre = (Field) parser.createField(sf, o);
-      if (pre != null) {
-        res.addValue(pre);
-      } else { // restore the original value
-        log.warn("Could not parse field {} - using original value as is: {}", 
src.getName(), o);
-        res.addValue(o);
-      }
-    }
-    return res;
-  }
-}
diff --git 
a/solr/core/src/test-files/solr/collection1/conf/schema-preanalyzed.xml 
b/solr/core/src/test-files/solr/collection1/conf/schema-preanalyzed.xml
deleted file mode 100644
index dd286865d76..00000000000
--- a/solr/core/src/test-files/solr/collection1/conf/schema-preanalyzed.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<schema name="tiny" version="1.7">
-
-  <fieldType name="preanalyzed-no-analyzer" class="solr.PreAnalyzedField" 
parserImpl="json"/>
-  <fieldType name="preanalyzed-with-analyzer" class="solr.PreAnalyzedField">
-    <analyzer>
-      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="preanalyzed-with-query-analyzer" 
class="solr.PreAnalyzedField">
-    <analyzer type="query">
-      <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="string" class="solr.StrField"/>
-  <fieldType name="long" class="${solr.tests.LongFieldType}" 
docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" 
positionIncrementGap="0"/>
-
-  <field name="id" type="string" indexed="true" stored="true" required="true"/>
-  <field name="_version_" type="long" indexed="true" stored="true" 
multiValued="false"/>
-  <field name="pre_no_analyzer" type="preanalyzed-no-analyzer" indexed="true" 
stored="true" multiValued="false"/>
-  <field name="pre_with_analyzer" type="preanalyzed-with-analyzer" 
indexed="true" stored="true" multiValued="false"/>
-  <field name="pre_with_query_analyzer" type="preanalyzed-with-query-analyzer" 
indexed="true" stored="true"
-         multiValued="false"/>
-
-  <uniqueKey>id</uniqueKey>
-
-</schema>
diff --git 
a/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
 
b/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
index 4e6d7bdd370..d9f96b3bcca 100644
--- 
a/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
+++ 
b/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
@@ -586,31 +586,6 @@
     </processor>
   </updateRequestProcessorChain>
 
-  <updateRequestProcessorChain name="pre-analyzed-simple">
-    <processor class="solr.PreAnalyzedUpdateProcessorFactory">
-      <str name="fieldName">subject</str>
-      <str name="fieldName">title</str>
-      <str name="fieldName">teststop</str>
-      <str name="fieldName">nonexistent</str>
-      <str name="fieldName">ssto</str>
-      <str name="fieldName">sind</str>
-      <str name="parser">simple</str>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="pre-analyzed-json">
-    <processor class="solr.PreAnalyzedUpdateProcessorFactory">
-      <str name="fieldName">subject</str>
-      <str name="fieldName">title</str>
-      <str name="fieldName">teststop</str>
-      <str name="fieldName">nonexistent</str>
-      <str name="fieldName">ssto</str>
-      <str name="fieldName">sind</str>
-      <str name="parser">json</str>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="ignore-commit-from-client-403">
     <processor class="solr.IgnoreCommitOptimizeUpdateProcessorFactory"/>
diff --git 
a/solr/core/src/test-files/solr/configsets/cloud-managed-preanalyzed/conf/managed-schema.xml
 
b/solr/core/src/test-files/solr/configsets/cloud-managed-preanalyzed/conf/managed-schema.xml
deleted file mode 100644
index 338551bbdd7..00000000000
--- 
a/solr/core/src/test-files/solr/configsets/cloud-managed-preanalyzed/conf/managed-schema.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<schema name="managed-preanalyzed" version="1.7">
-  <fieldType name="string" class="solr.StrField"/>
-  <fieldType name="int" class="${solr.tests.IntegerFieldType}" 
docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" 
positionIncrementGap="0"/>
-  <fieldType name="long" class="${solr.tests.LongFieldType}" 
docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" 
positionIncrementGap="0"/>
-
-  <fieldType name="preanalyzed-no-analyzer" class="solr.PreAnalyzedField" 
parserImpl="json"/>
-  <fieldType name="preanalyzed-with-analyzer" class="solr.PreAnalyzedField">
-    <analyzer>
-      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="preanalyzed-with-query-analyzer" 
class="solr.PreAnalyzedField">
-    <analyzer type="query">
-      <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-    </analyzer>
-  </fieldType>
-
-  <!-- for versioning -->
-  <field name="_version_" type="long" indexed="true" stored="true"/>
-  <field name="_root_" type="string" indexed="true" stored="true" 
multiValued="false" required="false"/>
-  <field name="id" type="string" indexed="true" stored="true"/>
-  <uniqueKey>id</uniqueKey>
-</schema>
diff --git 
a/solr/core/src/test-files/solr/configsets/cloud-managed-preanalyzed/conf/solrconfig.xml
 
b/solr/core/src/test-files/solr/configsets/cloud-managed-preanalyzed/conf/solrconfig.xml
deleted file mode 100644
index e4ef7eb551c..00000000000
--- 
a/solr/core/src/test-files/solr/configsets/cloud-managed-preanalyzed/conf/solrconfig.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0" ?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Minimal solrconfig.xml with /select, /admin and /update only -->
-
-<config>
-
-  <dataDir>${solr.data.dir:}</dataDir>
-
-  <directoryFactory name="DirectoryFactory"
-                    
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
-
-  <schemaFactory class="ManagedIndexSchemaFactory">
-    <bool name="mutable">${managed.schema.mutable:true}</bool>
-    <str name="managedSchemaResourceName">managed-schema.xml</str>
-  </schemaFactory>
-
-  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
-
-  <updateHandler class="solr.DirectUpdateHandler2">
-    <commitWithin>
-      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
-    </commitWithin>
-    <updateLog></updateLog>
-  </updateHandler>
-
-  <requestHandler name="/select" class="solr.SearchHandler">
-    <lst name="defaults">
-      <str name="echoParams">explicit</str>
-      <str name="indent">true</str>
-      <str name="df">text</str>
-    </lst>
-
-  </requestHandler>
-</config>
diff --git 
a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java
 
b/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java
deleted file mode 100644
index 39cdfb893f7..00000000000
--- 
a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.schema;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.schema.SchemaRequest;
-import 
org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldResponse;
-import 
org.apache.solr.client.solrj.response.schema.SchemaResponse.UpdateResponse;
-import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.common.cloud.DocCollection;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class PreAnalyzedFieldManagedSchemaCloudTest extends SolrCloudTestCase {
-
-  private static final String COLLECTION = "managed-preanalyzed";
-  private static final String CONFIG = "cloud-managed-preanalyzed";
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(2).addConfig(CONFIG, configset(CONFIG)).configure();
-    CollectionAdminRequest.createCollection(COLLECTION, CONFIG, 2, 1)
-        .process(cluster.getSolrClient());
-    cluster
-        .getZkStateReader()
-        .waitForState(
-            COLLECTION,
-            DEFAULT_TIMEOUT,
-            TimeUnit.SECONDS,
-            (n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
-  }
-
-  @Test
-  public void testAdd2Fields() throws Exception {
-    addField(keyValueArrayToMap("name", "field1", "type", "string"));
-    addField(keyValueArrayToMap("name", "field2", "type", "string"));
-  }
-
-  private void addField(Map<String, Object> field) throws Exception {
-    CloudSolrClient client = cluster.getSolrClient();
-    UpdateResponse addFieldResponse = new 
SchemaRequest.AddField(field).process(client, COLLECTION);
-    assertNotNull(addFieldResponse);
-    assertEquals(0, addFieldResponse.getStatus());
-    assertNull(addFieldResponse.getResponse().get("errors"));
-    FieldResponse fieldResponse =
-        new SchemaRequest.Field(field.get("name").toString()).process(client, 
COLLECTION);
-    assertNotNull(fieldResponse);
-    assertEquals(0, fieldResponse.getStatus());
-  }
-
-  private Map<String, Object> keyValueArrayToMap(String... 
alternatingKeysAndValues) {
-    Map<String, Object> map = new HashMap<>();
-    for (int i = 0; i < alternatingKeysAndValues.length; i += 2)
-      map.put(alternatingKeysAndValues[i], alternatingKeysAndValues[i + 1]);
-    return map;
-  }
-}
diff --git 
a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java 
b/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java
deleted file mode 100644
index f7e40cd575b..00000000000
--- a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.schema;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.document.Field;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.schema.PreAnalyzedField.PreAnalyzedParser;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PreAnalyzedFieldTest extends SolrTestCaseJ4 {
-  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final String[] valid = {
-    "1 one two three", // simple parsing
-    "1  one  two   three ", // spurious spaces
-    "1 one,s=123,e=128,i=22  two three,s=20,e=22,y=foobar", // attribs
-    "1 \\ one\\ \\,,i=22,a=\\, two\\=\n\r\t\\n,\\ =\\   \\", // escape madness
-    "1 ,i=22 ,i=33,s=2,e=20 , ", // empty token text, non-empty attribs
-    "1 =This is the stored part with \\= \n \\n \t \\t escapes.=one two three  
\u0001ąćęłńóśźż", // stored plus token stream
-    "1 ==", // empty stored, no token stream
-    "1 =this is a test.=", // stored + empty token stream
-    "1 one,p=deadbeef two,p=0123456789abcdef three" // payloads
-  };
-
-  private static final String[] validParsed = {
-    "1 one,s=0,e=3 two,s=4,e=7 three,s=8,e=13",
-    "1 one,s=1,e=4 two,s=6,e=9 three,s=12,e=17",
-    "1 one,i=22,s=123,e=128,y=word two,i=1,s=5,e=8,y=word 
three,i=1,s=20,e=22,y=foobar",
-    "1 \\ one\\ \\,,i=22,s=0,e=6 two\\=\\n\\r\\t\\n,i=1,s=7,e=15 
\\\\,i=1,s=17,e=18",
-    "1 i=22,s=0,e=0 i=33,s=2,e=20 i=1,s=2,e=2",
-    "1 =This is the stored part with = \n \\n \t \\t escapes.=one,s=0,e=3 
two,s=4,e=7 three,s=8,e=13 \u0001ąćęłńóśźż,s=15,e=25",
-    "1 ==",
-    "1 =this is a test.=",
-    "1 one,p=deadbeef,s=0,e=3 two,p=0123456789abcdef,s=4,e=7 three,s=8,e=13"
-  };
-
-  private static final String[] invalidSimple = {
-    "one two three", // missing version #
-    "2 one two three", // invalid version #
-    "1 o,ne two", // missing escape
-    "1 one t=wo", // missing escape
-    "1 one,, two", // missing attribs, unescaped comma
-    "1 one,s ", // missing attrib value
-    "1 one,s= val", // missing attrib value, unescaped space
-    "1 one,s=,val", // unescaped comma
-    "1 =", // unescaped equals
-    "1 =stored ", // unterminated stored
-    "1 ===" // empty stored (ok), but unescaped = in token stream
-  };
-
-  private static final String validJson =
-      
json("{'v':'1','str':'stored-value','tokens':[{'t':'a'},{'t':'b'},{'t':'c'}]}");
-
-  private static final String[] invalidJson = {
-    // missing enclosing object
-    
json("'v':'1','str':'stored-value','tokens':[{'t':'a'},{'t':'b'},{'t':'c'}]"),
-    // missing version #
-    json("{'str':'stored-value','tokens':[{'t':'a'},{'t':'b'},{'t':'c'}]}"),
-    // invalid version #
-    
json("{'v':'2','str':'stored-value','tokens':[{'t':'a'},{'t':'b'},{'t':'c'}]}"),
-    // single token no attribs
-    json("{'v':'1','str':'stored-value','tokens':[{}]}"),
-    // missing attrib value
-    json("{'v':'1','str':'stored-value','tokens':[{'t'}]}"),
-  };
-
-  SchemaField field = null;
-  int props = FieldProperties.INDEXED | FieldProperties.STORED;
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    initCore("solrconfig-minimal.xml", "schema-preanalyzed.xml");
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    field = new SchemaField("content", new TextField(), props, null);
-  }
-
-  @Test
-  public void testValidSimple() {
-    PreAnalyzedField paf = new PreAnalyzedField();
-    // use Simple format
-    HashMap<String, String> args = new HashMap<>();
-    args.put(PreAnalyzedField.PARSER_IMPL, 
SimplePreAnalyzedParser.class.getName());
-    paf.init(h.getCore().getLatestSchema(), args);
-    PreAnalyzedParser parser = new SimplePreAnalyzedParser();
-    for (int i = 0; i < valid.length; i++) {
-      String s = valid[i];
-      try {
-        Field f = (Field) paf.fromString(field, s);
-        // System.out.println(" - toString: '" + sb.toString() + "'");
-        assertEquals(validParsed[i], parser.toFormattedString(f));
-      } catch (Exception e) {
-        log.error("Should pass: '{}', exception", s, e);
-        fail("Should pass: '" + s + "', exception: " + e);
-      }
-    }
-  }
-
-  private String addTwoDocs(int firstId, String field) {
-    return "<add>\n"
-        + doc(
-            "id",
-            Integer.toString(firstId),
-            field,
-            json(
-                "{'v':'1','str':'document 
one','tokens':[{'t':'one'},{'t':'two'},{'t':'three','i':100}]}"))
-        + doc(
-            "id",
-            Integer.toString(firstId + 1),
-            field,
-            json(
-                "{'v':'1','str':'document 
two','tokens':[{'t':'eleven'},{'t':'twelve'},{'t':'thirteen','i':110}]}"))
-        + "</add>\n";
-  }
-
-  @Test
-  public void testIndexAndQueryNoSchemaAnalyzer() {
-    assertU(addTwoDocs(1, "pre_no_analyzer"));
-    assertU(commit());
-    assertQ(
-        req("q", "id:(1 2)", "sort", "id asc"),
-        "//result[@numFound='2']",
-        "//result/doc[1]/str[@name='id'][.='1']",
-        "//result/doc[1]/str[@name='pre_no_analyzer'][.='document one']",
-        "//result/doc[2]/str[@name='id'][.='2']",
-        "//result/doc[2]/str[@name='pre_no_analyzer'][.='document two']");
-    assertQ(
-        req("q", "{!field 
f='pre_no_analyzer'}{'v':'1','tokens':[{'t':'two'}]}"),
-        "//result[@numFound='1']");
-    assertQ(
-        req("q", "{!field 
f='pre_no_analyzer'}{'v':'1','tokens':[{'t':'eleven'},{'t':'twelve'}]}"),
-        "//result[@numFound='1']");
-  }
-
-  @Test
-  public void testIndexAndQueryWithSchemaAnalyzer() {
-    assertU(addTwoDocs(3, "pre_with_analyzer"));
-    assertU(commit());
-    assertQ(
-        req("q", "id:(3 4)", "sort", "id asc"),
-        "//result[@numFound='2']",
-        "//result/doc[1]/str[@name='id'][.='3']",
-        "//result/doc[1]/str[@name='pre_with_analyzer'][.='document one']",
-        "//result/doc[2]/str[@name='id'][.='4']",
-        "//result/doc[2]/str[@name='pre_with_analyzer'][.='document two']");
-    assertQ(req("q", "pre_with_analyzer:(+two +three)"), 
"//result[@numFound='1']");
-    assertQ(req("q", "pre_with_analyzer:(+eleven +twelve)"), 
"//result[@numFound='1']");
-  }
-
-  @Test
-  public void testIndexAndQueryWithSchemaQueryAnalyzer() {
-    assertU(addTwoDocs(5, "pre_with_query_analyzer"));
-    assertU(commit());
-    assertQ(
-        req("q", "id:(5 6)", "sort", "id asc"),
-        "//result[@numFound='2']",
-        "//result/doc[1]/str[@name='id'][.='5']",
-        "//result/doc[1]/str[@name='pre_with_query_analyzer'][.='document 
one']",
-        "//result/doc[2]/str[@name='id'][.='6']",
-        "//result/doc[2]/str[@name='pre_with_query_analyzer'][.='document 
two']");
-    assertQ(req("q", "pre_with_query_analyzer:one,two"), 
"//result[@numFound='1']");
-    assertQ(req("q", "pre_with_query_analyzer:eleven,twelve"), 
"//result[@numFound='1']");
-  }
-
-  @Test
-  public void testInvalidSimple() {
-    PreAnalyzedField paf = new PreAnalyzedField();
-    paf.init(h.getCore().getLatestSchema(), Collections.<String, 
String>emptyMap());
-    for (String s : invalidSimple) {
-      try {
-        paf.fromString(field, s);
-        fail("should fail: '" + s + "'");
-      } catch (Exception e) {
-        //
-      }
-    }
-  }
-
-  public void testInvalidJson() throws Exception {
-    PreAnalyzedField paf = new PreAnalyzedField();
-    paf.init(h.getCore().getLatestSchema(), Collections.emptyMap());
-    Analyzer preAnalyzer = paf.getIndexAnalyzer();
-    for (String s : invalidJson) {
-      TokenStream stream = null;
-      try {
-        stream = preAnalyzer.tokenStream("dummy", s);
-        stream.reset(); // exception should be triggered here.
-        fail("should fail: '" + s + "'");
-      } catch (Exception e) {
-        // expected
-      } finally {
-        if (stream != null) {
-          stream.close();
-        }
-      }
-    }
-    // make sure the analyzer can now handle properly formatted input
-    TokenStream stream = preAnalyzer.tokenStream("dummy", validJson);
-    CharTermAttribute termAttr = stream.addAttribute(CharTermAttribute.class);
-    stream.reset();
-    while (stream.incrementToken()) {
-      assertNotEquals("zero-length token", 0, termAttr.length());
-    }
-    stream.end();
-    stream.close();
-  }
-
-  // "1 =test ąćęłńóśźż \u0001=one,i=22,s=123,e=128,p=deadbeef,y=word 
two,i=1,s=5,e=8,y=word
-  // three,i=1,s=20,e=22,y=foobar"
-
-  private static final String jsonValid =
-      "{\"v\":\"1\",\"str\":\"test ąćęłńóśźż\",\"tokens\":["
-          + 
"{\"e\":128,\"i\":22,\"p\":\"DQ4KDQsODg8=\",\"s\":123,\"t\":\"one\",\"y\":\"word\"},"
-          + "{\"e\":8,\"i\":1,\"s\":5,\"t\":\"two\",\"y\":\"word\"},"
-          + "{\"e\":22,\"i\":1,\"s\":20,\"t\":\"three\",\"y\":\"foobar\"}"
-          + "]}";
-
-  @Test
-  public void testParsers() throws Exception {
-    PreAnalyzedField paf = new PreAnalyzedField();
-    // use Simple format
-    HashMap<String, String> args = new HashMap<>();
-    args.put(PreAnalyzedField.PARSER_IMPL, 
SimplePreAnalyzedParser.class.getName());
-    paf.init(h.getCore().getLatestSchema(), args);
-    {
-      Field f = (Field) paf.fromString(field, valid[0]);
-    }
-
-    // use JSON format
-    args.put(PreAnalyzedField.PARSER_IMPL, 
JsonPreAnalyzedParser.class.getName());
-    paf.init(h.getCore().getLatestSchema(), args);
-    expectThrows(Exception.class, () -> paf.fromString(field, valid[0]));
-
-    byte[] deadbeef =
-        new byte[] {
-          (byte) 0xd,
-          (byte) 0xe,
-          (byte) 0xa,
-          (byte) 0xd,
-          (byte) 0xb,
-          (byte) 0xe,
-          (byte) 0xe,
-          (byte) 0xf
-        };
-    PreAnalyzedParser parser = new JsonPreAnalyzedParser();
-
-    {
-      Field f = (Field) paf.fromString(field, jsonValid);
-      assertEquals(jsonValid, parser.toFormattedString(f));
-    }
-  }
-}
diff --git 
a/solr/core/src/test/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorTest.java
 
b/solr/core/src/test/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorTest.java
deleted file mode 100644
index c18a3a85ecb..00000000000
--- 
a/solr/core/src/test/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorTest.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.update.processor;
-
-import org.apache.lucene.document.Field;
-import org.apache.solr.common.SolrInputDocument;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class PreAnalyzedUpdateProcessorTest extends UpdateProcessorTestBase {
-  String[] simpleTitle = new String[] {"not pre-analyzed", "1 =string 
value=foo bar"};
-  String[] jsonTitle =
-      new String[] {
-        "not pre-analyzed",
-        "{\"v\":\"1\",\"str\":\"string 
value\",\"tokens\":[{\"t\":\"foo\"},{\"t\":\"bar\"}]}",
-      };
-  String[] simpleTeststop =
-      new String[] {"1 =this is a test.=one two three", "1 =this is a 
test.=three four five"};
-  String[] jsonTeststop =
-      new String[] {
-        "{\"v\":\"1\",\"str\":\"this is a 
test.\",\"tokens\":[{\"t\":\"one\"},{\"t\":\"two\"},{\"t\":\"three\"}]}",
-        "{\"v\":\"1\",\"str\":\"this is a 
test.\",\"tokens\":[{\"t\":\"three\"},{\"t\":\"four\"},{\"t\":\"five\"}]}",
-      };
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    initCore("solrconfig-update-processor-chains.xml", "schema12.xml");
-  }
-
-  @Test
-  public void testSimple() throws Exception {
-    test("pre-analyzed-simple", simpleTitle, simpleTeststop);
-  }
-
-  @Test
-  public void testJson() throws Exception {
-    test("pre-analyzed-json", jsonTitle, jsonTeststop);
-  }
-
-  private void test(String chain, String[] title, String[] teststop) throws 
Exception {
-    SolrInputDocument doc =
-        processAdd(
-            chain,
-            doc(
-                f("id", "1"),
-                f("title", title[0]),
-                f("teststop", teststop[0]),
-                f("nonexistent", "foobar"),
-                f("ssto", teststop[0]),
-                f("sind", teststop[0])));
-    assertEquals("title should be unchanged", title[0], 
doc.getFieldValue("title"));
-    assertTrue("teststop should be a Field", doc.getFieldValue("teststop") 
instanceof Field);
-    Field f = (Field) doc.getFieldValue("teststop");
-    assertEquals("teststop should have stringValue", "this is a test.", 
f.stringValue());
-    assertNotNull("teststop should have tokensStreamValue", 
f.tokenStreamValue());
-    assertNull("nonexistent should be dropped", doc.getField("nonexistent"));
-    // check how SchemaField type affects stored/indexed part processing
-    f = (Field) doc.getFieldValue("ssto");
-    assertNotNull("should have ssto", f);
-    assertNotNull("should have stringValue", f.stringValue());
-    assertNull("should not have tokenStreamValue", f.tokenStreamValue());
-    f = (Field) doc.getFieldValue("sind");
-    assertNotNull("should have sind", f);
-    assertNull("should not have stringValue: '" + f.stringValue() + "'", 
f.stringValue());
-    assertNotNull("should have tokenStreamValue", f.tokenStreamValue());
-
-    doc =
-        processAdd(
-            chain,
-            doc(
-                f("id", "2"),
-                f("title", title[1]),
-                f("teststop", teststop[1]),
-                f("nonexistent", "foobar"),
-                f("ssto", teststop[1]),
-                f("sind", teststop[1])));
-    assertTrue("title should be a Field", doc.getFieldValue("title") 
instanceof Field);
-    assertTrue("teststop should be a Field", doc.getFieldValue("teststop") 
instanceof Field);
-    f = (Field) doc.getFieldValue("teststop");
-    assertEquals("teststop should have stringValue", "this is a test.", 
f.stringValue());
-    assertNotNull("teststop should have tokensStreamValue", 
f.tokenStreamValue());
-    assertNull("nonexistent should be dropped", doc.getField("nonexistent"));
-    // check how SchemaField type affects stored/indexed part processing
-    f = (Field) doc.getFieldValue("ssto");
-    assertNotNull("should have ssto", f);
-    assertNotNull("should have stringValue", f.stringValue());
-    assertNull("should not have tokenStreamValue", f.tokenStreamValue());
-    f = (Field) doc.getFieldValue("sind");
-    assertNotNull("should have sind", f);
-    assertNull("should not have stringValue: '" + f.stringValue() + "'", 
f.stringValue());
-    assertNotNull("should have tokenStreamValue", f.tokenStreamValue());
-
-    assertU(commit());
-    assertQ(
-        req("teststop:\"one two three\""),
-        "//str[@name='id'][.='1']",
-        "//str[@name='teststop'][.='this is a test.']");
-    assertQ(
-        req("teststop:three"),
-        "//*[@numFound='2']",
-        "//result/doc[1]/str[@name='id'][.='1']",
-        "//result/doc[1]/str[@name='title'][.='not pre-analyzed']",
-        "//result/doc[2]/str[@name='id'][.='2']",
-        "//result/doc[2]/arr[@name='title']/str[.='string value']");
-    assertQ(req("ssto:three"), "//*[@numFound='0']");
-    assertQ(req("sind:three"), "//*[@numFound='2']");
-  }
-}
diff --git 
a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema.xml
 
b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema.xml
index cb521ecf914..4ff07b39c21 100644
--- 
a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema.xml
+++ 
b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema.xml
@@ -132,7 +132,6 @@
    -->
    <field name="id" type="string" indexed="true" stored="true" required="true" 
multiValued="false" />
 
-   <field name="pre" type="preanalyzed" indexed="true" stored="true"/>
    <field name="sku" type="text_en_splitting_tight" indexed="true" 
stored="true" omitNorms="true"/>
    <field name="name" type="text_general" indexed="true" stored="true" 
uninvertible="true"/>
    <field name="manu" type="text_gen_sort" indexed="true" stored="true" 
omitNorms="true" multiValued="false" useDocValuesAsStored="true"/>
@@ -1181,13 +1180,6 @@
       </analyzer>
     </fieldType>
 
-    <!-- Pre-analyzed field type, allows inserting arbitrary token streams and 
stored values. -->
-    <fieldType name="preanalyzed" class="solr.PreAnalyzedField">
-      <!-- PreAnalyzedField's builtin index analyzer just decodes the 
pre-analyzed token stream. -->
-      <analyzer type="query">
-        <tokenizer name="whitespace"/>
-      </analyzer>
-    </fieldType>
 
   <!-- Similarity is the scoring routine for each document vs. a query.
        A custom Similarity or SimilarityFactory may be specified here, but
diff --git 
a/solr/solr-ref-guide/modules/configuration-guide/pages/update-request-processors.adoc
 
b/solr/solr-ref-guide/modules/configuration-guide/pages/update-request-processors.adoc
index 4a0b2a801f9..c763f539cfb 100644
--- 
a/solr/solr-ref-guide/modules/configuration-guide/pages/update-request-processors.adoc
+++ 
b/solr/solr-ref-guide/modules/configuration-guide/pages/update-request-processors.adoc
@@ -398,8 +398,6 @@ When using any of these factories, please consult the 
{solr-javadocs}/core/org/a
 
 
{solr-javadocs}/core/org/apache/solr/update/processor/ParseLongFieldUpdateProcessorFactory.html[ParseLongFieldUpdateProcessorFactory]:::
 Attempts to mutate selected fields that have only CharSequence-typed values 
into Long values.
 
-{solr-javadocs}/core/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.html[PreAnalyzedUpdateProcessorFactory]::
 An update processor that parses configured fields of any document being added 
using _PreAnalyzedField_ with the configured format parser.
-
 
{solr-javadocs}/core/org/apache/solr/update/processor/RegexReplaceProcessorFactory.html[RegexReplaceProcessorFactory]::
 An updated processor that applies a configured regex to any CharSequence 
values found in the selected fields, and replaces any matches with the 
configured replacement string.
 
 
{solr-javadocs}/core/org/apache/solr/update/processor/RemoveBlankFieldUpdateProcessorFactory.html[RemoveBlankFieldUpdateProcessorFactory]::
 Removes any values found which are CharSequence with a length of 0 (i.e., 
empty strings).
diff --git a/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc 
b/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc
index e78233d4a5d..9b50849716c 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc
@@ -32,7 +32,6 @@
 *** xref:currencies-exchange-rates.adoc[]
 *** xref:date-formatting-math.adoc[]
 *** xref:enum-fields.adoc[]
-*** xref:external-files-processes.adoc[]
 *** xref:field-properties-by-use-case.adoc[]
 ** xref:copy-fields.adoc[]
 ** xref:dynamic-fields.adoc[]
diff --git 
a/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc
 
b/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc
deleted file mode 100644
index 77029ceaac0..00000000000
--- 
a/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc
+++ /dev/null
@@ -1,251 +0,0 @@
-= External Files and Processes
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-Solr can consume a stream of tokens that have already undergone analysis with 
a field type called the `PreAnalyzedFieldType`.
-
-== The PreAnalyzedField Type
-
-The `PreAnalyzedField` type provides a way to send to Solr serialized token 
streams, optionally with independent stored values of a field, and have this 
information stored and indexed without any additional text processing applied 
in Solr.
-This is useful if user wants to submit field content that was already 
processed by some existing external text processing pipeline (e.g., it has been 
tokenized, annotated, stemmed, synonyms inserted, etc.), while using all the 
rich attributes that Lucene's TokenStream provides (per-token attributes).
-
-The serialization format is pluggable using implementations of 
PreAnalyzedParser interface.
-There are two out-of-the-box implementations:
-
-* <<JsonPreAnalyzedParser>>: as the name suggests, it parses content that uses 
JSON to represent field's content.
-This is the default parser to use if the field type is not configured 
otherwise.
-* <<SimplePreAnalyzedParser>>: uses a simple strict plain text format, which 
in some situations may be easier to create than JSON.
-
-There is only one configuration parameter, `parserImpl`.
-The value of this parameter should be a fully qualified class name of a class 
that implements PreAnalyzedParser interface.
-The default value of this parameter is 
`org.apache.solr.schema.JsonPreAnalyzedParser`.
-
-By default, the query-time analyzer for fields of this type will be the same 
as the index-time analyzer, which expects serialized pre-analyzed text.
-You must add a query type analyzer to your fieldType in order to perform 
analysis on non-pre-analyzed queries.
-In the example below, the index-time analyzer expects the default JSON 
serialization format, and the query-time analyzer will employ 
StandardTokenizer/LowerCaseFilter:
-
-[source,xml]
-----
-<fieldType name="pre_with_query_analyzer" class="solr.PreAnalyzedField">
-  <analyzer type="query">
-    <tokenizer class="solr.StandardTokenizerFactory"/>
-    <filter class="solr.LowerCaseFilterFactory"/>
-  </analyzer>
-</fieldType>
-----
-
-=== JsonPreAnalyzedParser
-
-This is the default serialization format used by PreAnalyzedField type.
-It uses a top-level JSON map with the following keys:
-
-[%autowidth.stretch,options="header"]
-|===
-|Key |Description |Required
-|`v` |Version key. Currently the supported version is `1`. |required
-|`str` |Stored string value of a field. You can use at most one of `str` or 
`bin`. |optional
-|`bin` |Stored binary value of a field. The binary value has to be Base64 
encoded. |optional
-|`tokens` |serialized token stream. This is a JSON list. |optional
-|===
-
-Any other top-level key is silently ignored.
-
-==== Token Stream Serialization
-
-The token stream is expressed as a JSON list of JSON maps.
-The map for each token consists of the following keys and values:
-
-[%autowidth.stretch,options="header"]
-|===
-|Key |Description |Lucene Attribute |Value |Required?
-|`t` |token 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/CharTermAttribute.html[CharTermAttribute]
 |UTF-8 string representing the current token |required
-|`s` |start offset 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/OffsetAttribute.html[OffsetAttribute]
 |Non-negative integer |optional
-|`e` |end offset |OffsetAttribute |Non-negative integer |optional
-|`i` |position increment 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.html[PositionIncrementAttribute]
 |Non-negative integer - default is `1` |optional
-|`p` |payload 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.html[PayloadAttribute]
 |Base64 encoded payload |optional
-|`y` |lexical type 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/TypeAttribute.html[TypeAttribute]
 |UTF-8 string |optional
-|`f` |flags 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/FlagsAttribute.html[FlagsAttribute]
 |String representing an integer value in hexadecimal format |optional
-|===
-
-Any other key is silently ignored.
-
-==== JsonPreAnalyzedParser Example
-
-[source,json]
-----
-{
-  "v":"1",
-  "str":"test ąćęłńóśźż",
-  "tokens": [
-    {"t":"two","s":5,"e":8,"i":1,"y":"word"},
-    {"t":"three","s":20,"e":22,"i":1,"y":"foobar"},
-    {"t":"one","s":123,"e":128,"i":22,"p":"DQ4KDQsODg8=","y":"word"}
-  ]
-}
-----
-
-=== SimplePreAnalyzedParser
-
-The fully qualified class name to use when specifying this format via the 
`parserImpl` configuration parameter is 
`org.apache.solr.schema.SimplePreAnalyzedParser`.
-
-==== SimplePreAnalyzedParser Syntax
-
-The serialization format supported by this parser is as follows:
-
-.Serialization format
-[source,text]
-----
-content ::= version (stored)? tokens
-version ::= digit+ " "
-; stored field value - any "=" inside must be escaped!
-stored ::= "=" text "="
-tokens ::= (token ((" ") + token)*)*
-token ::= text ("," attrib)*
-attrib ::= name '=' value
-name ::= text
-value ::= text
-----
-
-Special characters in "text" values can be escaped using the escape character 
`\`.
-The following escape sequences are recognized:
-
-[width="60%",options="header",]
-|===
-|EscapeSequence |Description
-|`\` |literal space character
-|`\,` |literal `,` character
-|`\=` |literal `=` character
-|`\\` |literal `\` character
-|`\n` |newline
-|`\r` |carriage return
-|`\t` |horizontal tab
-|===
-
-Please note that Unicode sequences (e.g., `\u0001`) are not supported.
-
-==== Supported Attributes
-
-The following token attributes are supported, and identified with short 
symbolic names:
-
-[%autowidth.stretch,options="header"]
-|===
-|Name |Description |Lucene attribute |Value format
-|`i` |position increment 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.html[PositionIncrementAttribute]
 |integer
-|`s` |start offset 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/OffsetAttribute.html[OffsetAttribute]
 |integer
-|`e` |end offset |OffsetAttribute |integer
-|`y` |lexical type 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/TypeAttribute.html[TypeAttribute]
 |string
-|`f` |flags 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/FlagsAttribute.html[FlagsAttribute]
 |hexadecimal integer
-|`p` |payload 
|{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.html[PayloadAttribute]
 |bytes in hexadecimal format; whitespace is ignored
-|===
-
-Token positions are tracked and implicitly added to the token stream - the 
start and end offsets consider only the term text and whitespace, and exclude 
the space taken by token attributes.
-
-==== Example Token Streams
-
-// TODO: in cwiki each of these examples was in its own "panel" ... do we want 
something like that here?
-// TODO: these examples match what was in cwiki, but I'm honestly not sure if 
the formatting there was correct to start?
-
-[source,text]
-----
-1 one two three
-----
-
-* version: 1
-* stored: null
-* token: (term=`one`,startOffset=0,endOffset=3)
-* token: (term=`two`,startOffset=4,endOffset=7)
-* token: (term=`three`,startOffset=8,endOffset=13)
-
-[source,text]
-----
-1 one  two    three
-----
-
-* version: 1
-* stored: null
-* token: (term=`one`,startOffset=0,endOffset=3)
-* token: (term=`two`,startOffset=5,endOffset=8)
-* token: (term=`three`,startOffset=11,endOffset=16)
-
-[source,text]
-----
-1 one,s=123,e=128,i=22 two three,s=20,e=22
-----
-
-* version: 1
-* stored: null
-* token: (term=`one`,positionIncrement=22,startOffset=123,endOffset=128)
-* token: (term=`two`,positionIncrement=1,startOffset=5,endOffset=8)
-* token: (term=`three`,positionIncrement=1,startOffset=20,endOffset=22)
-
-[source,text]
-----
-1 \ one\ \,,i=22,a=\, two\=
-
-\n,\ =\ \
-----
-
-* version: 1
-* stored: null
-* token: (term=`one ,`,positionIncrement=22,startOffset=0,endOffset=6)
-* token: (term=`two=` ,positionIncrement=1,startOffset=7,endOffset=15)
-* token: (term=`\`,positionIncrement=1,startOffset=17,endOffset=18)
-
-Note that unknown attributes and their values are ignored, so in this example, 
the "```a```" attribute on the first token and the " " (escaped space) 
attribute on the second token are ignored, along with their values, because 
they are not among the supported attribute names.
-
-[source,text]
-----
-1 ,i=22 ,i=33,s=2,e=20 ,
-----
-
-* version: 1
-* stored: null
-* token: (term=,positionIncrement=22,startOffset=0,endOffset=0)
-* token: (term=,positionIncrement=33,startOffset=2,endOffset=20)
-* token: (term=,positionIncrement=1,startOffset=2,endOffset=2)
-
-[source,text]
-----
-1 =This is the stored part with \=
-\n \t escapes.=one two three
-----
-
-* version: 1
-* stored: `This is the stored part with =   \t escapes.`
-* token: (term=`one`,startOffset=0,endOffset=3)
-* token: (term=`two`,startOffset=4,endOffset=7)
-* token: (term=`three`,startOffset=8,endOffset=13)
-
-Note that the `\t` in the above stored value is not literal; it's shown that 
way to visually indicate the actual tab char that is in the stored value.
-
-[source,text]
-----
-1 ==
-----
-
-* version: 1
-* stored: ""
-* (no tokens)
-
-[source,text]
-----
-1 =this is a test.=
-----
-
-* version: 1
-* stored: `this is a test.`
-* (no tokens)
diff --git 
a/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
 
b/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
index e7f42d8ec45..082318a6754 100644
--- 
a/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
+++ 
b/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
@@ -59,10 +59,6 @@ The 
{solr-javadocs}/core/org/apache/solr/schema/package-summary.html[`org.apache
 
 |PointType |A single-valued n-dimensional point. It's both for sorting spatial 
data that is _not_ lat-lon, and for some more rare use-cases. (NOTE: this is 
_not_ related to the "Point" based numeric fields). See 
xref:query-guide:spatial-search.adoc[] for more information.
 
-|PreAnalyzedField |Provides a way to send to Solr serialized token streams, 
optionally with independent stored values of a field, and have this information 
stored and indexed without any additional text processing.
-
-Configuration and usage of PreAnalyzedField is documented in the section  
xref:external-files-processes.adoc#the-preanalyzedfield-type[PreAnalyzedField 
Type].
-
 |RandomSortField |Does not contain a value. Queries that sort on this field 
type will return results in random order. Use a dynamic field to use this 
feature.
 
 |RankField |Can be used to store scoring factors to improve document ranking. 
To be used in combination with 
xref:query-guide:other-parsers.adoc#ranking-query-parser[RankQParserPlugin].
diff --git 
a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-10.adoc 
b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-10.adoc
index 88c6058531c..bbbda70b6c1 100644
--- 
a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-10.adoc
+++ 
b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-10.adoc
@@ -116,6 +116,8 @@ Nowadays, the HTTP request is available via internal APIs: 
`SolrQueryRequest.get
 
 * EnumField has been removed.  Users should migrate to the `EnumFieldType` 
implementation.
 
+* PreAnalyzedField and PreAnalyzedUpdateProcessor have been removed due to 
incompatibility with Lucene 10 (SOLR-17839).
+
 
 === Security
 

Reply via email to