@inproceedings{1a10cdb1946b4d7f87387e54a7c39e6a,
title = "Rule refinement with recurrent neural networks",
abstract = "Recurrent neural networks can be trained to behave like deterministic finite-state automata (DFA's) and methods have been developed for extracting grammatical rules from trained networks. Using a simple method for inserting prior knowledge of a subset of the DFA state transitions into recurrent neural networks, we show that recurrent neural networks are able to perform rule refinement. The results from training a recurrent neural network to recognize a known non-trivial, randomly generated regular grammar show that not only do the networks preserve correct prior knowledge, but that they are able to correct through training inserted prior knowledge which was wrong. (By wrong, we mean that the inserted rules were not the ones in the randomly generated grammar.)",
author = "Giles, {C. Lee} and Omlin, {Christian W.}",
note = "Publisher Copyright: {\textcopyright} 1993 IEEE.; IEEE International Conference on Neural Networks, ICNN 1993 ; Conference date: 28-03-1993 Through 01-04-1993",
year = "1993",
doi = "10.1109/ICNN.1993.298658",
language = "English (US)",
series = "IEEE International Conference on Neural Networks - Conference Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "801--806",
booktitle = "1993 IEEE International Conference on Neural Networks, ICNN 1993",
address = "United States",
}