ctakes-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vjapa...@apache.org
Subject svn commit: r1555281 [1/3] - in /ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima: annotators/ dao/ lookup/ lookup/ae/ mapper/ model/ resource/
Date Fri, 03 Jan 2014 23:22:59 GMT
Author: vjapache
Date: Fri Jan  3 23:22:58 2014
New Revision: 1555281

URL: http://svn.apache.org/r1555281
Log:
ytex upgrade

Added:
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DBConsumer.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DateAnnotator.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/MetaMapToCTakesAnnotator.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NamedEntityRegexAnnotator.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NegexAnnotator.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SegmentRegexAnnotator.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SenseDisambiguatorAnnotator.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SentenceDetector.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SentenceSpan.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/DocumentDao.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/DocumentDaoImpl.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/NamedEntityRegexDao.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/NamedEntityRegexDaoImpl.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/SegmentRegexDao.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/dao/SegmentRegexDaoImpl.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/lookup/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/lookup/ae/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/lookup/ae/FirstTokenPermLookupInitializerImpl.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/lookup/ae/StemmedLookupAnnotationToJCasAdapter.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/AnnoMappingInfo.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/ColumnMappingInfo.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/DocumentMapperService.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/DocumentMapperServiceImpl.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/ISO8601Converter.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/mapper/MapperConfig.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/AnnotationContainmentLink.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/BooleanUtil.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/Document.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/DocumentAnnotation.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/DocumentClass.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/NamedEntityRegex.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/SegmentRegex.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/model/UimaType.java
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/resource/
    ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/resource/InitableJdbcConnectionResourceImpl.java

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DBConsumer.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DBConsumer.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DBConsumer.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DBConsumer.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,126 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.ytex.uima.ApplicationContextHolder;
+import org.apache.ctakes.ytex.uima.mapper.DocumentMapperService;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.cas.impl.XmiCasSerializer;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.resource.ResourceInitializationException;
+import org.apache.uima.util.XMLSerializer;
+import org.xml.sax.SAXException;
+
+/**
+ * Store the document text, cas, and annotations in the database. Delegates to
+ * DocumentMapperService. This is an annotator and not a consumer because
+ * according to the uima docs the Consumer interface is deprecated. Config
+ * parameters:
+ * <ul>
+ * <li>xmiOutputDirectory - String - directory where the xmi serialized cas
+ * should be stored. Leave empty if you don't want to store the xmi. Defaults to
+ * empty.
+ * <li>analysisBatch - String - Document group/analysis batch, stored in
+ * document.analysis_batch. Defaults to current date/time.
+ * <li>storeDocText - boolean - should the document text be stored in the DB?
+ * defaults to true
+ * <li>storeCAS - boolean - should the serialized xmi cas be stored in the DB?
+ * defaults to true
+ * <li>typesToIngore - multivalued String - uima types not to be saved.
+ * <li>insertAnnotationContainmentLinks - boolean - should we store containment
+ * links? defaults to true
+ * </ul>
+ * 
+ * @author vijay
+ * 
+ */
+public class DBConsumer extends JCasAnnotator_ImplBase {
+	private static final Log log = LogFactory.getLog(DBConsumer.class);
+	private DocumentMapperService documentMapperService;
+	private String xmiOutputDirectory;
+	private String analysisBatch;
+	private boolean bStoreDocText;
+	private boolean bStoreCAS;
+	private boolean bInsertAnnotationContainmentLinks;
+	private Set<String> setTypesToIgnore = new HashSet<String>();
+
+	/**
+	 * read config parameters
+	 */
+	@Override
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		super.initialize(aContext);
+		xmiOutputDirectory = (String) aContext
+				.getConfigParameterValue("xmiOutputDirectory");
+		analysisBatch = (String) aContext
+				.getConfigParameterValue("analysisBatch");
+		Boolean boolStoreDocText = (Boolean) aContext
+				.getConfigParameterValue("storeDocText");
+		Boolean boolStoreCAS = (Boolean) aContext
+				.getConfigParameterValue("storeCAS");
+		Boolean boolInsertAnnotationContainmentLinks = (Boolean) aContext
+				.getConfigParameterValue("insertAnnotationContainmentLinks");
+		String typesToIgnore[] = (String[]) aContext
+				.getConfigParameterValue("typesToIgnore");
+		if (typesToIgnore != null)
+			setTypesToIgnore.addAll(Arrays.asList(typesToIgnore));
+		bStoreDocText = boolStoreDocText == null ? true : boolStoreDocText
+				.booleanValue();
+		bStoreCAS = boolStoreCAS == null ? true : boolStoreCAS.booleanValue();
+		bInsertAnnotationContainmentLinks = boolInsertAnnotationContainmentLinks == null ? true
+				: boolInsertAnnotationContainmentLinks.booleanValue();
+		documentMapperService = (DocumentMapperService) ApplicationContextHolder
+				.getApplicationContext().getBean("documentMapperService");
+	}
+
+	/**
+	 * call the documentMapperService to save the document. if the
+	 * xmiOutputDirectory is defined, write the document to an xmi file. use the
+	 * name corresponding to the documentID.
+	 */
+	@Override
+	public void process(JCas jcas) {
+		Integer documentID = documentMapperService.saveDocument(jcas,
+				analysisBatch, bStoreDocText, bStoreCAS, bInsertAnnotationContainmentLinks, setTypesToIgnore);
+		if (documentID != null && xmiOutputDirectory != null
+				&& xmiOutputDirectory.length() > 0) {
+			File dirOut = new File(xmiOutputDirectory);
+			if (!dirOut.exists() && !dirOut.isDirectory()) {
+				log.error(xmiOutputDirectory + " does not exist");
+			} else {
+				BufferedWriter writer = null;
+				try {
+					writer = new BufferedWriter(new FileWriter(
+							xmiOutputDirectory + File.separatorChar
+									+ documentID.toString() + ".xmi"));
+					XmiCasSerializer ser = new XmiCasSerializer(
+							jcas.getTypeSystem());
+					XMLSerializer xmlSer = new XMLSerializer(writer, false);
+					ser.serialize(jcas.getCas(), xmlSer.getContentHandler());
+				} catch (IOException e) {
+					log.error("error writing xmi, documentID=" + documentID, e);
+				} catch (SAXException e) {
+					log.error("error writing xmi, documentID=" + documentID, e);
+				} finally {
+					if (writer != null) {
+						try {
+							writer.close();
+						} catch (IOException e) {
+						}
+					}
+				}
+			}
+		}
+	}
+
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DateAnnotator.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DateAnnotator.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DateAnnotator.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/DateAnnotator.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,81 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.text.SimpleDateFormat;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.ytex.uima.types.Date;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
+import org.apache.uima.cas.FSIterator;
+import org.apache.uima.cas.Type;
+import org.apache.uima.cas.text.AnnotationIndex;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.jcas.tcas.Annotation;
+import org.apache.uima.resource.ResourceInitializationException;
+
+
+
+import com.mdimension.jchronic.Chronic;
+import com.mdimension.jchronic.utils.Span;
+
+/**
+ * The cTAKES date doesn't actually parse the date. Parse the date with Chronic,
+ * store a new annotation with the real date. Takes as initialization parameter
+ * a type name; defaults to "edu.mayo.bmi.uima.cdt.ae.type.DateAnnotation"
+ * Iterate through all annotations of this type, and use chronic to parse the
+ * covered text.
+ */
+public class DateAnnotator extends JCasAnnotator_ImplBase {
+	private static final Log log = LogFactory.getLog(DateAnnotator.class);
+	public static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
+
+	String dateType;
+
+	private ThreadLocal<SimpleDateFormat> tlDateFormat = new ThreadLocal<SimpleDateFormat>() {
+		@Override
+		protected SimpleDateFormat initialValue() {
+			return new SimpleDateFormat(DATE_FORMAT);
+		}
+	};
+
+	@Override
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		super.initialize(aContext);
+		dateType = (String) aContext.getConfigParameterValue("dateType");
+		if (dateType == null) {
+			dateType = "edu.mayo.bmi.uima.core.type.textsem.DateAnnotation";
+		}
+	}
+
+	@Override
+	public void process(JCas jCas) throws AnalysisEngineProcessException {
+		Type t = jCas.getTypeSystem().getType(dateType);
+		if (t != null) {
+			AnnotationIndex<Annotation> annoIndex = jCas.getAnnotationIndex();
+			FSIterator<Annotation> iter = annoIndex.iterator();
+			while (iter.hasNext()) {
+				Annotation anno = iter.next();
+				try {
+					Span span = Chronic.parse(anno.getCoveredText());
+					if (span != null && span.getBeginCalendar() != null) {
+						Date date = new Date(jCas);
+						date.setBegin(anno.getBegin());
+						date.setEnd(anno.getEnd());
+						date.setDate(tlDateFormat.get().format(
+								span.getBeginCalendar().getTime()));
+						date.addToIndexes();
+					}
+				} catch (Exception e) {
+					if (log.isDebugEnabled())
+						log.debug(
+								"chronic failed on: " + anno.getCoveredText(),
+								e);
+				}
+			}
+		}
+	}
+
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/MetaMapToCTakesAnnotator.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/MetaMapToCTakesAnnotator.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/MetaMapToCTakesAnnotator.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/MetaMapToCTakesAnnotator.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,272 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.typesystem.type.constants.CONST;
+import org.apache.ctakes.typesystem.type.refsem.OntologyConcept;
+import org.apache.ctakes.typesystem.type.textsem.EntityMention;
+import org.apache.ctakes.typesystem.type.textsem.IdentifiedAnnotation;
+import org.apache.ctakes.typesystem.type.textsem.MedicationEventMention;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
+import org.apache.uima.cas.FSIterator;
+import org.apache.uima.cas.Feature;
+import org.apache.uima.cas.Type;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.jcas.cas.FSArray;
+import org.apache.uima.jcas.cas.StringArray;
+import org.apache.uima.jcas.tcas.Annotation;
+import org.apache.uima.resource.ResourceInitializationException;
+
+/**
+ * Convert metamap concepts to ctakes named entities.
+ * <p/>
+ * Create MedicationEventMention/EntityMention annotations for each set of
+ * CandidateConcept annotations that span the same text.
+ * <p/>
+ * if checkMedications is set to true, see if a named entity has a medication
+ * semantic type. if so, create a MedicationEventMention. else create
+ * EntityMention.
+ * 
+ * @author vijay
+ * 
+ */
+public class MetaMapToCTakesAnnotator extends JCasAnnotator_ImplBase {
+	private static final Log log = LogFactory
+			.getLog(MetaMapToCTakesAnnotator.class);
+	private boolean checkMedications = false;
+
+	private static final String[] medicationAbrs = { "aapp", "antb", "bacs",
+			"bodm", "carb", "chem", "chvf", "chvs", "clnd", "eico", "elii",
+			"enzy", "hops", "horm", "imft", "inch", "irda", "lipd", "nnon",
+			"nsba", "opco", "orch", "phsu", "rcpt", "strd", "vita" };
+
+	private static Set<String> setMedicationAbrs;
+	static {
+		setMedicationAbrs = new HashSet<String>(Arrays.asList(medicationAbrs));
+	}
+
+	public static class NegSpan {
+		int begin;
+
+		public int getBegin() {
+			return begin;
+		}
+
+		public void setBegin(int begin) {
+			this.begin = begin;
+		}
+
+		public int getEnd() {
+			return end;
+		}
+
+		public void setEnd(int end) {
+			this.end = end;
+		}
+
+		@Override
+		public int hashCode() {
+			final int prime = 31;
+			int result = 1;
+			result = prime * result + begin;
+			result = prime * result + end;
+			return result;
+		}
+
+		@Override
+		public boolean equals(Object obj) {
+			if (this == obj)
+				return true;
+			if (obj == null)
+				return false;
+			if (getClass() != obj.getClass())
+				return false;
+			NegSpan other = (NegSpan) obj;
+			if (begin != other.begin)
+				return false;
+			if (end != other.end)
+				return false;
+			return true;
+		}
+
+		public NegSpan(int begin, int end) {
+			super();
+			this.begin = begin;
+			this.end = end;
+		}
+
+		public NegSpan(Annotation anno) {
+			super();
+			this.begin = anno.getBegin();
+			this.end = anno.getEnd();
+		}
+
+		int end;
+	}
+
+	/**
+	 * get all negated spans
+	 * 
+	 * @param jcas
+	 * @return
+	 */
+	private Set<NegSpan> getNegatedSpans(JCas jcas) {
+		Set<NegSpan> negSet = new HashSet<NegSpan>();
+		// get the Metamap type
+		Type negType = jcas.getTypeSystem().getType(
+				"org.metamap.uima.ts.Negation");
+		// abort if the type is not found
+		if (negType == null) {
+			log.debug("no negated concepts");
+		} else {
+			Feature spanFeature = negType.getFeatureByBaseName("ncSpans");
+			if (spanFeature == null) {
+				log.warn("no ncSpans feature!");
+			} else {
+				FSIterator<Annotation> negIter = jcas.getAnnotationIndex(
+						negType).iterator();
+				while (negIter.hasNext()) {
+					Annotation negAnno = negIter.next();
+					FSArray spanArr = (FSArray) negAnno
+							.getFeatureValue(spanFeature);
+					if (spanArr != null) {
+						for (int i = 0; i < spanArr.size(); i++) {
+							negSet.add(new NegSpan((Annotation) spanArr.get(i)));
+						}
+					}
+				}
+			}
+		}
+		return negSet;
+	}
+
+	@Override
+	public void process(JCas jcas) throws AnalysisEngineProcessException {
+		// get the negated spans
+		Set<NegSpan> negSet = getNegatedSpans(jcas);
+		// get the Metamap type
+		Type candidateType = jcas.getTypeSystem().getType(
+				"org.metamap.uima.ts.Candidate");
+		// abort if the type is not found
+		if (candidateType == null) {
+			log.debug("no candidate concepts");
+			return;
+		}
+		// get the cui feature
+		Feature cuiFeature = candidateType.getFeatureByBaseName("cui");
+		if (cuiFeature == null) {
+			log.warn("no cui feature!");
+			return;
+		}
+		Feature tuiFeature = candidateType
+				.getFeatureByBaseName("semanticTypes");
+		if (tuiFeature == null) {
+			log.warn("no semanticTypes feature!");
+			return;
+		}
+
+		// iterate through candidates
+		FSIterator<Annotation> candidateIter = jcas.getAnnotationIndex(
+				candidateType).iterator();
+		// span we are working on in loop
+		int begin = -1;
+		int end = -1;
+		// concepts for a given span
+		Map<String, OntologyConcept> concepts = new HashMap<String, OntologyConcept>();
+		// is one of the concepts for the span a medication?
+		boolean bMedication = false;
+		while (candidateIter.hasNext()) {
+			Annotation annoCandidate = candidateIter.next();
+			if (begin >= 0 && begin == annoCandidate.getBegin()
+					&& end == annoCandidate.getEnd()) {
+				// this candidate spans the same text as the last named entity
+				// add it as one of the concepts
+				bMedication = addConcept(jcas, concepts, annoCandidate,
+						cuiFeature, tuiFeature, bMedication);
+
+			} else {
+				// moving on to a new named entity, finalize the old one
+				addNamedEntity(jcas, begin, end, concepts, bMedication, negSet);
+				// reset span
+				begin = annoCandidate.getBegin();
+				end = annoCandidate.getEnd();
+				bMedication = addConcept(jcas, concepts, annoCandidate,
+						cuiFeature, tuiFeature, bMedication);
+			}
+		}
+		addNamedEntity(jcas, begin, end, concepts, bMedication, negSet);
+	}
+
+	private void addNamedEntity(JCas jcas, int begin, int end,
+			Map<String, OntologyConcept> concepts, boolean bMedication,
+			Set<NegSpan> negSet) {
+		if (concepts.isEmpty())
+			return;
+		IdentifiedAnnotation neLast = bMedication ? new MedicationEventMention(
+				jcas) : new EntityMention(jcas);
+		neLast.setPolarity(negSet.contains(new NegSpan(begin, end)) ? CONST.NE_POLARITY_NEGATION_PRESENT
+				: CONST.NE_POLARITY_NEGATION_ABSENT);
+		neLast.setBegin(begin);
+		neLast.setEnd(end);
+		FSArray ocArr = new FSArray(jcas, concepts.size());
+		int ocArrIdx = 0;
+		for (OntologyConcept oc : concepts.values()) {
+			// set the cui field if this is in fact a cui
+			ocArr.set(ocArrIdx, oc);
+			ocArrIdx++;
+		}
+		neLast.setOntologyConceptArr(ocArr);
+		concepts.clear();
+		neLast.addToIndexes();
+	}
+
+	/**
+	 * add a concept to the map of concepts for the current named entity.
+	 * 
+	 * @param jcas
+	 * @param concepts
+	 * @param annoCandidate
+	 * @param cuiFeature
+	 * @param tuiFeature
+	 * @param bMedication
+	 * @return is this concept a medication concept? only check if
+	 *         checkMedications is true
+	 */
+	private boolean addConcept(JCas jcas,
+			Map<String, OntologyConcept> concepts, Annotation annoCandidate,
+			Feature cuiFeature, Feature tuiFeature, boolean bMedication) {
+		String cui = annoCandidate.getStringValue(cuiFeature);
+		if (concepts.containsKey(cui))
+			return bMedication;
+		OntologyConcept oc = new OntologyConcept(jcas);
+		oc.setCode(cui);
+		oc.setCodingScheme("METAMAP");
+		StringArray tuiArr = (StringArray) annoCandidate
+				.getFeatureValue(tuiFeature);
+		List<String> tuis = null;
+		if (tuiArr != null)
+			tuis = Arrays.asList(tuiArr.toStringArray());
+		concepts.put(cui, oc);
+		return checkMedications && tuis != null ? !Collections.disjoint(
+				setMedicationAbrs, tuis) : false;
+	}
+
+	@Override
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		super.initialize(aContext);
+		checkMedications = (Boolean) aContext
+				.getConfigParameterValue("checkMedications");
+	}
+
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NamedEntityRegexAnnotator.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NamedEntityRegexAnnotator.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NamedEntityRegexAnnotator.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NamedEntityRegexAnnotator.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,121 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.typesystem.type.refsem.OntologyConcept;
+import org.apache.ctakes.typesystem.type.textsem.EntityMention;
+import org.apache.ctakes.typesystem.type.textspan.Segment;
+import org.apache.ctakes.ytex.uima.ApplicationContextHolder;
+import org.apache.ctakes.ytex.uima.dao.NamedEntityRegexDao;
+import org.apache.ctakes.ytex.uima.model.NamedEntityRegex;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
+import org.apache.uima.cas.FSIterator;
+import org.apache.uima.cas.text.AnnotationIndex;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.jcas.cas.FSArray;
+import org.apache.uima.jcas.tcas.Annotation;
+import org.apache.uima.resource.ResourceInitializationException;
+
+/**
+ * Create NamedEntity annotations. Use regex to identify the Named Entities.
+ * Read the named entity regex - concept id map from the db.
+ * 
+ * @author vijay
+ * 
+ */
+public class NamedEntityRegexAnnotator extends JCasAnnotator_ImplBase {
+	private static final Log log = LogFactory
+			.getLog(NamedEntityRegexAnnotator.class);
+
+	private NamedEntityRegexDao neRegexDao;
+	private Map<NamedEntityRegex, Pattern> regexMap = new HashMap<NamedEntityRegex, Pattern>();
+
+	// private Integer getTypeIdForClassName(String strClassName) {
+	// try {
+	// Class<?> clazz = Class.forName(strClassName);
+	// Field field = clazz.getDeclaredField("typeIndexID");
+	// return field.getInt(clazz);
+	// } catch (Exception e) {
+	// log.error("config error, could not get type id for class: "
+	// + strClassName, e);
+	// return null;
+	// }
+	// }
+
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		neRegexDao = (NamedEntityRegexDao) ApplicationContextHolder
+				.getApplicationContext().getBean("namedEntityRegexDao");
+		List<NamedEntityRegex> regexList = neRegexDao.getNamedEntityRegexs();
+		initRegexMap(regexList);
+	}
+
+	protected void initRegexMap(List<NamedEntityRegex> regexList) {
+		for (NamedEntityRegex regex : regexList) {
+			if (log.isDebugEnabled())
+				log.debug(regex);
+			Pattern pat = Pattern.compile(regex.getRegex());
+			regexMap.put(regex, pat);
+		}
+	}
+
+	@Override
+	public void process(JCas aJCas) throws AnalysisEngineProcessException {
+		for (Map.Entry<NamedEntityRegex, Pattern> entry : regexMap.entrySet()) {
+			if (entry.getKey().getContext() != null) {
+				AnnotationIndex<Annotation> idx = aJCas
+						.getAnnotationIndex(Segment.typeIndexID);
+				FSIterator<Annotation> iter = idx.iterator();
+				while (iter.hasNext()) {
+					Segment segment = (Segment) iter.next();
+					if (entry.getKey().getContext().equals(segment.getId())) {
+						processRegex(aJCas, segment, entry.getKey(),
+								entry.getValue());
+					}
+				}
+			} else {
+				// no context specified - search entire document
+				processRegex(aJCas, null, entry.getKey(), entry.getValue());
+			}
+		}
+	}
+
+	/**
+	 * Search the document / annotation span for with the supplied pattern. If
+	 * we get a hit, create a named entity annotation.
+	 * 
+	 * @param aJCas
+	 * @param anno
+	 * @param neRegex
+	 * @param pattern
+	 */
+	private void processRegex(JCas aJCas, Annotation anno,
+			NamedEntityRegex neRegex, Pattern pattern) {
+		String docText = aJCas.getDocumentText();
+		String annoText = anno != null ? docText.substring(anno.getBegin(),
+				anno.getEnd()) : docText;
+		int nOffset = anno != null ? anno.getBegin() : 0;
+		Matcher matcher = pattern.matcher(annoText);
+		while (matcher.find()) {
+			EntityMention ne = new EntityMention(aJCas);
+			ne.setBegin(nOffset + matcher.start());
+			ne.setEnd(nOffset + matcher.end());
+			FSArray ocArr = new FSArray(aJCas, 1);
+			OntologyConcept oc = new OntologyConcept(aJCas);
+			oc.setCode(neRegex.getCode());
+			oc.setCodingScheme(neRegex.getCodingScheme());
+			oc.setOid(neRegex.getOid());
+			ocArr.set(0, oc);
+			ne.setOntologyConceptArr(ocArr);
+			ne.addToIndexes();
+		}
+	}
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NegexAnnotator.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NegexAnnotator.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NegexAnnotator.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/NegexAnnotator.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,829 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.lang.reflect.InvocationTargetException;
+import java.nio.CharBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.beanutils.BeanUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.typesystem.type.textsem.ContextAnnotation;
+import org.apache.ctakes.typesystem.type.textsem.IdentifiedAnnotation;
+import org.apache.ctakes.typesystem.type.textspan.Sentence;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.cas.FSIterator;
+import org.apache.uima.cas.text.AnnotationIndex;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.jcas.tcas.Annotation;
+import org.apache.uima.resource.ResourceInitializationException;
+
+
+/**
+ * Negex adapted to cTAKES. Checks negation status of named entities. Loads
+ * negex triggers from classpath:
+ * <tt>/org/apache/ctakes/ytex/uima/annotators/negex_triggers.txt</tt>
+ * <p/>
+ * The meaning of the certainty and confidence attributes is nowhere documented
+ * for cTakes. There are several ways of handling 'maybes', see below. Default
+ * behavior: certainty attribute for negated & possible terms set to -1
+ * Configure with following uima initialization parameters:
+ * <li>checkPossibilities : should we check for possibilities
+ * <li>negatePossibilities : should possibilities be negated, default = true? if
+ * true,
+ * <ul>
+ * <li>negated: polarity=-1, confidence=1
+ * <li>possible: polarity=-1, confidence=-1
+ * <li>affirmed: polarity=1, confidence=1
+ * </ul
+ * if false
+ * <ul>
+ * <li>negated: polarity=-1, confidence=1
+ * <li>possible: polarity=1, confidence=-1
+ * <li>affirmed: polarity=1, confidence=1
+ * </ul>
+ * <li>storeAsInterval
+ * <ul>
+ * <li>negated: polarity=-1, confidence = -1
+ * <li>possible: polarity=1, confidence = 0.5
+ * <li>affirmed: polarity=1, confidence = 1
+ * </ul>
+ * 
+ * Added support for negating arbitrary annotations. Set the targetTypeName to
+ * an annotation type. Will see if it is negated; if so will set the negated and
+ * possible boolean values on the annotation.
+ * 
+ * @author vijay
+ * 
+ */
+public class NegexAnnotator extends JCasAnnotator_ImplBase {
+	private static final Log log = LogFactory.getLog(NegexAnnotator.class);
+	private List<NegexRule> listNegexRules = null;
+	private boolean negatePossibilities = true;
+	private boolean checkPossibilities = true;
+	private boolean storeAsInterval = false;
+	private String targetTypeName = null;
+
+	@Override
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		super.initialize(aContext);
+		this.listNegexRules = this.initializeRules();
+		negatePossibilities = getBooleanConfigParam(aContext,
+				"negatePossibilities", negatePossibilities);
+		if (negatePossibilities) {
+			checkPossibilities = true;
+		} else {
+			checkPossibilities = getBooleanConfigParam(aContext,
+					"checkPossibilities", checkPossibilities);
+		}
+		storeAsInterval = getBooleanConfigParam(aContext, "storeAsInterval",
+				storeAsInterval);
+		targetTypeName = (String) aContext
+				.getConfigParameterValue("targetTypeName");
+	}
+
+	private boolean getBooleanConfigParam(UimaContext aContext, String param,
+			boolean defaultVal) {
+		Boolean paramValue = (Boolean) aContext.getConfigParameterValue(param);
+		return paramValue == null ? defaultVal : paramValue;
+
+	}
+
+	private List<String> initalizeRuleList() {
+		List<String> rules = new ArrayList<String>();
+		BufferedReader reader = null;
+		try {
+			reader = new BufferedReader(new InputStreamReader(this.getClass()
+					.getResourceAsStream(
+							"/org/apache/ctakes/ytex/uima/annotators/negex_triggers.txt")));
+			String line = null;
+			try {
+				while ((line = reader.readLine()) != null)
+					rules.add(line);
+			} catch (IOException e) {
+				log.error("oops", e);
+			}
+			Collections.sort(rules, new Comparator<String>() {
+
+				@Override
+				public int compare(String o1, String o2) {
+					int l1 = o1.trim().length();
+					int l2 = o2.trim().length();
+					if (l1 < l2)
+						return 1;
+					else if (l1 > l2)
+						return -1;
+					else
+						return 0;
+				}
+
+			});
+		} finally {
+			try {
+				if (reader != null)
+					reader.close();
+			} catch (IOException e) {
+				log.error("oops", e);
+			}
+		}
+		return rules;
+	}
+
+	private List<NegexRule> initializeRules() {
+		List<String> listRules = this.initalizeRuleList();
+		List<NegexRule> listNegexRules = new ArrayList<NegexRule>(
+				listRules.size());
+		Iterator<String> iRule = listRules.iterator();
+		while (iRule.hasNext()) {
+			String rule = iRule.next();
+			Pattern p = Pattern.compile("[\\t]+"); // Working.
+			String[] ruleTokens = p.split(rule.trim());
+			if (ruleTokens.length == 2) {
+				// Add the regular expression characters to tokens and asemble
+				// the
+				// rule again.
+				String[] ruleMembers = ruleTokens[0].trim().split(" ");
+				String rule2 = "";
+				for (int i = 0; i <= ruleMembers.length - 1; i++) {
+					if (!ruleMembers[i].equals("")) {
+						if (ruleMembers.length == 1) {
+							rule2 = ruleMembers[i];
+						} else {
+							rule2 = rule2 + ruleMembers[i].trim() + "\\s+";
+						}
+					}
+				}
+				// Remove the last s+
+				if (rule2.endsWith("\\s+")) {
+					rule2 = rule2.substring(0, rule2.lastIndexOf("\\s+"));
+				}
+
+				String rule3 = "(?m)(?i)[[\\p{Punct}&&[^\\]\\[]]|\\s+]("
+						+ rule2 + ")[[\\p{Punct}&&[^_]]|\\s+]";
+
+				Pattern p2 = Pattern.compile(rule3.trim());
+				listNegexRules.add(new NegexRule(p2, rule2, ruleTokens[1]
+						.trim()));
+			} else {
+				log.warn("could not parse rule:" + rule);
+			}
+			// Matcher m = p2.matcher(sentence);
+			//
+			// while (m.find() == true) {
+			// sentence = m.replaceAll(" " + ruleTokens[1].trim()
+			// + m.group().trim().replaceAll(" ", filler)
+			// + ruleTokens[1].trim() + " ");
+			// }
+		}
+		return listNegexRules;
+
+	}
+
+	public static interface TargetAnnoFilter {
+		public boolean filter(Annotation anno);
+	}
+
+	/**
+	 * only bother with IdentifiedAnnotations that have concepts
+	 * 
+	 * @author vijay
+	 * 
+	 */
+	public static class NamedEntityTargetAnnoFilter implements TargetAnnoFilter {
+
+		@Override
+		public boolean filter(Annotation anno) {
+			if (!(anno instanceof IdentifiedAnnotation))
+				return false;
+			IdentifiedAnnotation ia = (IdentifiedAnnotation) anno;
+			return ia.getOntologyConceptArr() != null
+					&& ia.getOntologyConceptArr().size() > 0;
+		}
+
+	}
+
+	@Override
+	public void process(JCas aJCas) {
+		AnnotationIndex sentenceIdx = aJCas
+				.getAnnotationIndex(Sentence.typeIndexID);
+		AnnotationIndex neIdx = aJCas
+				.getAnnotationIndex(IdentifiedAnnotation.typeIndexID);
+		negateAnnotations(aJCas, sentenceIdx, neIdx,
+				new NamedEntityTargetAnnoFilter());
+		if (targetTypeName != null) {
+			try {
+				negateAnnotations(
+						aJCas,
+						sentenceIdx,
+						aJCas.getAnnotationIndex(aJCas.getTypeSystem().getType(
+								targetTypeName)), null);
+			} catch (Exception e) {
+				log.error("error getting typeSystemId for " + targetTypeName, e);
+			}
+		}
+	}
+
+	private void negateAnnotations(JCas aJCas, AnnotationIndex sentenceIdx,
+			AnnotationIndex targetIdx, TargetAnnoFilter filter) {
+		FSIterator sentenceIter = sentenceIdx.iterator();
+		while (sentenceIter.hasNext()) {
+			Sentence s = (Sentence) sentenceIter.next();
+			FSIterator neIter = targetIdx.subiterator(s);
+			while (neIter.hasNext()) {
+				Annotation ne = (Annotation) neIter.next();
+				if (filter == null || filter.filter(ne))
+					checkNegation(aJCas, s, ne);
+				// checkNegation2(aJCas, s, ne);
+			}
+		}
+	}
+
+	public static class NegexRule {
+
+		@Override
+		public String toString() {
+			return "NegexRule [rule=" + rule + ", tag=" + tag + "]";
+		}
+
+		@Override
+		public int hashCode() {
+			final int prime = 31;
+			int result = 1;
+			result = prime * result + ((rule == null) ? 0 : rule.hashCode());
+			result = prime * result + ((tag == null) ? 0 : tag.hashCode());
+			return result;
+		}
+
+		@Override
+		public boolean equals(Object obj) {
+			if (this == obj)
+				return true;
+			if (obj == null)
+				return false;
+			if (getClass() != obj.getClass())
+				return false;
+			NegexRule other = (NegexRule) obj;
+			if (rule == null) {
+				if (other.rule != null)
+					return false;
+			} else if (!rule.equals(other.rule))
+				return false;
+			if (tag == null) {
+				if (other.tag != null)
+					return false;
+			} else if (!tag.equals(other.tag))
+				return false;
+			return true;
+		}
+
+		private Pattern pattern;
+		private String tag;
+		private String rule;
+
+		public Pattern getPattern() {
+			return pattern;
+		}
+
+		public void setPattern(Pattern pattern) {
+			this.pattern = pattern;
+		}
+
+		public String getTag() {
+			return tag;
+		}
+
+		public void setTag(String tag) {
+			this.tag = tag;
+		}
+
+		public String getRule() {
+			return rule;
+		}
+
+		public void setRule(String rule) {
+			this.rule = rule;
+		}
+
+		public NegexRule() {
+			super();
+		}
+
+		public NegexRule(Pattern pattern, String rule, String tag) {
+			super();
+			this.pattern = pattern;
+			this.tag = tag;
+			this.rule = rule;
+		}
+	}
+
+	public static class NegexToken implements Comparable<NegexToken> {
+		private int start;
+		private int end;
+		private NegexRule rule;
+
+		@Override
+		public String toString() {
+			return "NegexToken [start=" + start + ", end=" + end + ", rule="
+					+ rule + "]";
+		}
+
+		@Override
+		public int hashCode() {
+			final int prime = 31;
+			int result = 1;
+			result = prime * result + end;
+			result = prime * result + ((rule == null) ? 0 : rule.hashCode());
+			result = prime * result + start;
+			return result;
+		}
+
+		@Override
+		public boolean equals(Object obj) {
+			if (this == obj)
+				return true;
+			if (obj == null)
+				return false;
+			if (getClass() != obj.getClass())
+				return false;
+			NegexToken other = (NegexToken) obj;
+			if (end != other.end)
+				return false;
+			if (rule == null) {
+				if (other.rule != null)
+					return false;
+			} else if (!rule.equals(other.rule))
+				return false;
+			if (start != other.start)
+				return false;
+			return true;
+		}
+
+		public NegexToken(int start, int end, NegexRule rule) {
+			super();
+			this.start = start;
+			this.end = end;
+			this.rule = rule;
+		}
+
+		@Override
+		public int compareTo(NegexToken o) {
+			return new Integer(this.start).compareTo(o.start);
+		}
+
+		public int getStart() {
+			return start;
+		}
+
+		public void setStart(int start) {
+			this.start = start;
+		}
+
+		public int getEnd() {
+			return end;
+		}
+
+		public void setEnd(int end) {
+			this.end = end;
+		}
+
+		public String getTag() {
+			return rule.getTag();
+		}
+
+	}
+
+	private NegexToken findTokenByTag(String tag, String stopTags[],
+			boolean before, int neRelStart, int neRelEnd, NegexToken tokens[]) {
+		Set<String> stopTagSet = new HashSet<String>(stopTags.length);
+		stopTagSet.addAll(Arrays.asList(stopTags));
+		if (before) {
+			for (int i = neRelStart - 1; i > 0; i--) {
+				if (tokens[i] != null) {
+					if (tokens[i].getTag().equals(tag)) {
+						return tokens[i];
+					} else if (stopTagSet.contains(tokens[i].getTag()))
+						break;
+				}
+			}
+		} else {
+			for (int i = neRelEnd; i < tokens.length; i++) {
+				if (tokens[i] != null) {
+					if (tokens[i].getTag().equals(tag)) {
+						return tokens[i];
+					} else if (stopTagSet.contains(tokens[i].getTag()))
+						break;
+				}
+			}
+		}
+		return null;
+	}
+
+	/**
+	 * check the negation status of the specfied term in the specified sentence
+	 * 
+	 * @param aJCas
+	 *            for adding annotations
+	 * @param s
+	 *            the sentence in which we will look
+	 * @param ne
+	 *            the named entity whose negation status will be checked.
+	 * @param checkPoss
+	 *            should possibility be checked?
+	 * @param negPoss
+	 *            should possiblities be negated?
+	 */
+	private void checkNegation(JCas aJCas, Sentence s, Annotation ne) {
+		if (storeAsInterval && ne instanceof IdentifiedAnnotation) {
+			// default is affirmed, which is coded as confidence = 1
+			((IdentifiedAnnotation) ne).setConfidence(1);
+		}
+		// need to add . on either side due to the way the regexs are built
+		String sentence = "." + s.getCoveredText() + ".";
+		// allocate array of tokens
+		// this maps each character of the sentence to a token
+		NegexToken[] tokens = new NegexToken[sentence.length()];
+		// char buffer for modify the sentence
+		// we want to 'black out' trigger words already found and the phrase we
+		// were looking for
+		CharBuffer buf = CharBuffer.wrap(sentence.toCharArray());
+		// calculate location of the ne relative to the sentence
+		int neRelStart = ne.getBegin() - s.getBegin() + 1;
+		int neRelEnd = ne.getEnd() - s.getBegin() + 1;
+		// black out the ne in the sentence buffer
+		for (int i = neRelStart; i < neRelEnd; i++) {
+			// black out the named entity from the char buffer
+			buf.put(i, '_');
+		}
+		// look for negex rules in the sentence
+		for (NegexRule rule : this.listNegexRules) {
+			Matcher m = rule.getPattern().matcher(buf);
+			while (m.find() == true) {
+				// see if the range has not already been marked
+				boolean bUnoccupied = true;
+				for (int i = m.start(); i < m.end() && bUnoccupied; i++)
+					bUnoccupied = tokens[i] == null;
+				if (bUnoccupied) {
+					// mark the range in the sentence with this token
+					// black it out so other rules do not match
+					NegexToken t = new NegexToken(m.start(), m.end(), rule);
+					for (int i = m.start(); i < m.end() && bUnoccupied; i++) {
+						// black out this range from the char buffer
+						buf.put(i, '_');
+						// add the token to the array
+						tokens[i] = t;
+					}
+				}
+			}
+		}
+		// prenegation
+		// look for a PREN rule before the ne, without any intervening stop tags
+		NegexToken t = this.findTokenByTag("[PREN]", new String[] { "[CONJ]",
+				"[PSEU]", "[POST]", "[PREP]", "[POSP]" }, true, neRelStart,
+				neRelEnd, tokens);
+		if (t != null) {
+			// hit - negate the ne
+			annotateNegation(aJCas, s, ne, t, true, false);
+		} else {
+			// look for POST rule after the ne, without any intervening stop
+			// tags
+			t = this.findTokenByTag("[POST]", new String[] { "[CONJ]",
+					"[PSEU]", "[PREN]", "[PREP]", "[POSP]" }, false,
+					neRelStart, neRelEnd, tokens);
+			if (t != null) {
+				annotateNegation(aJCas, s, ne, t, true, false);
+			} else if (this.checkPossibilities || this.negatePossibilities) {
+				// check possibles
+				t = this.findTokenByTag("[PREP]", new String[] { "[CONJ]",
+						"[PSEU]", "[PREN]", "[POST]", "[POSP]" }, true,
+						neRelStart, neRelEnd, tokens);
+				if (t != null) {
+					annotateNegation(aJCas, s, ne, t, false, true);
+				} else {
+					t = this.findTokenByTag("[POSP]", new String[] { "[CONJ]",
+							"[PSEU]", "[PREN]", "[POST]", "[PREP]" }, false,
+							neRelStart, neRelEnd, tokens);
+					if (t != null)
+						annotateNegation(aJCas, s, ne, t, true, true);
+				}
+			}
+		}
+	}
+
+	private void checkNegation2(JCas aJCas, Sentence s,
+			IdentifiedAnnotation ne, boolean negPoss) {
+		// Sorter s = new Sorter();
+		String sToReturn = "";
+		String sScope = "";
+		// String sentencePortion = "";
+		// ArrayList sortedRules = null;
+
+		String filler = "_";
+		// boolean negationScope = true;
+
+		// Sort the rules by length in descending order.
+		// Rules need to be sorted so the longest rule is always tried to match
+		// first.
+		// Some of the rules overlap so without sorting first shorter rules
+		// (some of them POSSIBLE or PSEUDO)
+		// would match before longer legitimate negation rules.
+		//
+
+		// There is efficiency issue here. It is better if rules are sorted by
+		// the
+		// calling program once and used without sorting in GennegEx.
+		// sortedRules = this.rules;
+
+		// Process the sentence and tag each matched negation
+		// rule with correct negation rule tag.
+		//
+		// At the same time check for the phrase that we want to decide
+		// the negation status for and
+		// tag the phrase with [PHRASE] ... [PHRASE]
+		// In both the negation rules and in the phrase replace white space
+		// with "filler" string. (This could cause problems if the sentences
+		// we study has "filler" on their own.)
+
+		// Sentence needs one character in the beginning and end to match.
+		// We remove the extra characters after processing.
+		// vng String sentence = "." + sentenceString + ".";
+		String sentence = "." + s.getCoveredText() + ".";
+
+		// Tag the phrases we want to detect for negation.
+		// Should happen before rule detection.
+		// vng String phrase = phraseString;
+		String phrase = ne.getCoveredText();
+		Pattern pph = Pattern.compile(phrase.trim(), Pattern.CASE_INSENSITIVE);
+		Matcher mph = pph.matcher(sentence);
+		CharBuffer buf = CharBuffer.wrap(sentence.toCharArray());
+
+		while (mph.find() == true) {
+			sentence = mph.replaceAll(" [PHRASE]"
+					+ mph.group().trim().replaceAll(" ", filler) + "[PHRASE]");
+		}
+
+		for (NegexRule rule : this.listNegexRules) {
+			Matcher m = rule.getPattern().matcher(sentence);
+			while (m.find() == true) {
+				sentence = m.replaceAll(" " + rule.getTag()
+						+ m.group().trim().replaceAll(" ", filler)
+						+ rule.getTag() + " ");
+			}
+		}
+
+		// Exchange the [PHRASE] ... [PHRASE] tags for [NEGATED] ... [NEGATED]
+		// based of PREN, POST rules and if flag is set to true
+		// then based on PREP and POSP, as well.
+
+		// Because PRENEGATION [PREN} is checked first it takes precedent over
+		// POSTNEGATION [POST].
+		// Similarly POSTNEGATION [POST] takes precedent over POSSIBLE
+		// PRENEGATION [PREP]
+		// and [PREP] takes precedent over POSSIBLE POSTNEGATION [POSP].
+
+		Pattern pSpace = Pattern.compile("[\\s+]");
+		String[] sentenceTokens = pSpace.split(sentence);
+		StringBuilder sb = new StringBuilder();
+
+		// Check for [PREN]
+		for (int i = 0; i < sentenceTokens.length; i++) {
+			sb.append(" " + sentenceTokens[i].trim());
+			if (sentenceTokens[i].trim().startsWith("[PREN]")) {
+
+				for (int j = i + 1; j < sentenceTokens.length; j++) {
+					if (sentenceTokens[j].trim().startsWith("[CONJ]")
+							|| sentenceTokens[j].trim().startsWith("[PSEU]")
+							|| sentenceTokens[j].trim().startsWith("[POST]")
+							|| sentenceTokens[j].trim().startsWith("[PREP]")
+							|| sentenceTokens[j].trim().startsWith("[POSP]")) {
+						break;
+					}
+
+					if (sentenceTokens[j].trim().startsWith("[PHRASE]")) {
+						sentenceTokens[j] = sentenceTokens[j].trim()
+								.replaceAll("\\[PHRASE\\]", "[NEGATED]");
+					}
+				}
+			}
+		}
+
+		sentence = sb.toString();
+		pSpace = Pattern.compile("[\\s+]");
+		sentenceTokens = pSpace.split(sentence);
+		StringBuilder sb2 = new StringBuilder();
+
+		// Check for [POST]
+		for (int i = sentenceTokens.length - 1; i > 0; i--) {
+			sb2.insert(0, sentenceTokens[i] + " ");
+			if (sentenceTokens[i].trim().startsWith("[POST]")) {
+				for (int j = i - 1; j > 0; j--) {
+					if (sentenceTokens[j].trim().startsWith("[CONJ]")
+							|| sentenceTokens[j].trim().startsWith("[PSEU]")
+							|| sentenceTokens[j].trim().startsWith("[PREN]")
+							|| sentenceTokens[j].trim().startsWith("[PREP]")
+							|| sentenceTokens[j].trim().startsWith("[POSP]")) {
+						break;
+					}
+
+					if (sentenceTokens[j].trim().startsWith("[PHRASE]")) {
+						sentenceTokens[j] = sentenceTokens[j].trim()
+								.replaceAll("\\[PHRASE\\]", "[NEGATED]");
+					}
+				}
+			}
+		}
+		sentence = sb2.toString();
+
+		// If POSSIBLE negation is detected as negation.
+		// negatePossible being set to "true" then check for [PREP] and [POSP].
+		if (negPoss == true) {
+			pSpace = Pattern.compile("[\\s+]");
+			sentenceTokens = pSpace.split(sentence);
+
+			StringBuilder sb3 = new StringBuilder();
+
+			// Check for [PREP]
+			for (int i = 0; i < sentenceTokens.length; i++) {
+				sb3.append(" " + sentenceTokens[i].trim());
+				if (sentenceTokens[i].trim().startsWith("[PREP]")) {
+
+					for (int j = i + 1; j < sentenceTokens.length; j++) {
+						if (sentenceTokens[j].trim().startsWith("[CONJ]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[PSEU]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[POST]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[PREN]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[POSP]")) {
+							break;
+						}
+
+						if (sentenceTokens[j].trim().startsWith("[PHRASE]")) {
+							sentenceTokens[j] = sentenceTokens[j].trim()
+									.replaceAll("\\[PHRASE\\]", "[POSSIBLE]");
+						}
+					}
+				}
+			}
+			sentence = sb3.toString();
+			pSpace = Pattern.compile("[\\s+]");
+			sentenceTokens = pSpace.split(sentence);
+			StringBuilder sb4 = new StringBuilder();
+
+			// Check for [POSP]
+			for (int i = sentenceTokens.length - 1; i > 0; i--) {
+				sb4.insert(0, sentenceTokens[i] + " ");
+				if (sentenceTokens[i].trim().startsWith("[POSP]")) {
+					for (int j = i - 1; j > 0; j--) {
+						if (sentenceTokens[j].trim().startsWith("[CONJ]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[PSEU]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[PREN]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[PREP]")
+								|| sentenceTokens[j].trim()
+										.startsWith("[POST]")) {
+							break;
+						}
+
+						if (sentenceTokens[j].trim().startsWith("[PHRASE]")) {
+							sentenceTokens[j] = sentenceTokens[j].trim()
+									.replaceAll("\\[PHRASE\\]", "[POSSIBLE]");
+						}
+					}
+				}
+			}
+			sentence = sb4.toString();
+		}
+
+		// Remove the filler character we used.
+		sentence = sentence.replaceAll(filler, " ");
+
+		// Remove the extra periods at the beginning
+		// and end of the sentence.
+		sentence = sentence.substring(0, sentence.trim().lastIndexOf('.'));
+		sentence = sentence.replaceFirst(".", "");
+
+		// Get the scope of the negation for PREN and PREP
+		if (sentence.contains("[PREN]") || sentence.contains("[PREP]")) {
+			int startOffset = sentence.indexOf("[PREN]");
+			if (startOffset == -1) {
+				startOffset = sentence.indexOf("[PREP]");
+			}
+
+			int endOffset = sentence.indexOf("[CONJ]");
+			if (endOffset == -1) {
+				endOffset = sentence.indexOf("[PSEU]");
+			}
+			if (endOffset == -1) {
+				endOffset = sentence.indexOf("[POST]");
+			}
+			if (endOffset == -1) {
+				endOffset = sentence.indexOf("[POSP]");
+			}
+			if (endOffset == -1 || endOffset < startOffset) {
+				endOffset = sentence.length() - 1;
+			}
+			sScope = sentence.substring(startOffset, endOffset + 1);
+		}
+
+		// Get the scope of the negation for POST and POSP
+		if (sentence.contains("[POST]") || sentence.contains("[POSP]")) {
+			int endOffset = sentence.lastIndexOf("[POST]");
+			if (endOffset == -1) {
+				endOffset = sentence.lastIndexOf("[POSP]");
+			}
+
+			int startOffset = sentence.lastIndexOf("[CONJ]");
+			if (startOffset == -1) {
+				startOffset = sentence.lastIndexOf("[PSEU]");
+			}
+			if (startOffset == -1) {
+				startOffset = sentence.lastIndexOf("[PREN]");
+			}
+			if (startOffset == -1) {
+				startOffset = sentence.lastIndexOf("[PREP]");
+			}
+			if (startOffset == -1) {
+				startOffset = 0;
+			}
+			sScope = sentence.substring(startOffset, endOffset);
+		}
+
+		// Classify to: negated/possible/affirmed
+		if (sentence.contains("[NEGATED]")) {
+			sentence = sentence + "\t" + "negated" + "\t" + sScope;
+		} else if (sentence.contains("[POSSIBLE]")) {
+			sentence = sentence + "\t" + "possible" + "\t" + sScope;
+		} else {
+			sentence = sentence + "\t" + "affirmed" + "\t" + sScope;
+		}
+
+		sToReturn = sentence;
+		System.out.println(sToReturn);
+	}
+
+	/**
+	 * set the certainty/confidence flag on a named entity, and add a negation
+	 * context annotation.
+	 * 
+	 * @param aJCas
+	 * @param s
+	 *            used to figure out text span
+	 * @param ne
+	 *            the certainty/confidence will be set to -1
+	 * @param t
+	 *            the token
+	 * @param fSetCertainty
+	 *            should we set the certainty (true) or confidence (false)
+	 */
+	private void annotateNegation(JCas aJCas, Sentence s, Annotation anno,
+			NegexToken t, boolean negated, boolean possible) {
+		if (anno instanceof IdentifiedAnnotation) {
+			IdentifiedAnnotation ne = (IdentifiedAnnotation) anno;
+			if (!storeAsInterval) {
+				if (possible)
+					ne.setConfidence(-1);
+				if (negated || (this.negatePossibilities && possible))
+					ne.setPolarity(-1);
+			} else {
+				ne.setPolarity(negated || possible ? -1 : 0);
+				float confidence = negated ? -1 : 1;
+				if (possible)
+					confidence *= 0.5;
+				ne.setConfidence(confidence);
+			}
+		} else {
+			try {
+				BeanUtils.setProperty(anno, "negated", negated);
+				BeanUtils.setProperty(anno, "possible", possible);
+			} catch (IllegalAccessException iae) {
+				log.error("error negating annotation", iae);
+			} catch (InvocationTargetException e) {
+				log.error("error negating annotation", e);
+			}
+		}
+		ContextAnnotation nec = new ContextAnnotation(aJCas);
+		nec.setBegin(s.getBegin() + t.getStart() - 1);
+		nec.setEnd(s.getBegin() + t.getEnd() - 1);
+		nec.setScope(t.getTag());
+		nec.setFocusText(anno.getCoveredText());
+		nec.addToIndexes();
+	}
+
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SegmentRegexAnnotator.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SegmentRegexAnnotator.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SegmentRegexAnnotator.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SegmentRegexAnnotator.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,165 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.typesystem.type.textspan.Segment;
+import org.apache.ctakes.ytex.uima.ApplicationContextHolder;
+import org.apache.ctakes.ytex.uima.dao.SegmentRegexDao;
+import org.apache.ctakes.ytex.uima.model.SegmentRegex;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.resource.ResourceInitializationException;
+
+import com.google.common.base.Strings;
+
+/**
+ * Annotate segments (i.e. sections). Use regexs to find segments. Read the
+ * regex-segment id map from the db.
+ * 
+ * @author vhacongarlav
+ * 
+ */
+public class SegmentRegexAnnotator extends JCasAnnotator_ImplBase {
+	private static final Log log = LogFactory
+			.getLog(SegmentRegexAnnotator.class);
+	private SegmentRegexDao segmentRegexDao;
+	private Map<SegmentRegex, Pattern> regexMap = new HashMap<SegmentRegex, Pattern>();
+	private String defaultSegmentId = "DEFAULT";
+
+	/**
+	 * Load the regex-segment map from the database using the segmentRegexDao.
+	 * Compile all the patterns.
+	 */
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		segmentRegexDao = (SegmentRegexDao) ApplicationContextHolder
+				.getApplicationContext().getBean("segmentRegexDao");
+		List<SegmentRegex> regexList = segmentRegexDao.getSegmentRegexs();
+		initRegexMap(regexList);
+		String defaultSegmentId = (String) aContext
+				.getConfigParameterValue("SegmentID");
+		if (!Strings.isNullOrEmpty(defaultSegmentId)) {
+			this.defaultSegmentId = defaultSegmentId;
+		}
+	}
+
+	protected void initRegexMap(List<SegmentRegex> regexList) {
+		for (SegmentRegex regex : regexList) {
+			if (log.isDebugEnabled())
+				log.debug(regex);
+			Pattern pat = Pattern.compile(regex.getRegex());
+			regexMap.put(regex, pat);
+		}
+	}
+
+	/**
+	 * Add Segment annotations to the cas. First create a list of segments. Then
+	 * sort the list according to segment start. For each segment that has no
+	 * end, set the end to the [beginning of next segment - 1], or the eof.
+	 */
+	@Override
+	public void process(JCas aJCas) throws AnalysisEngineProcessException {
+		String strDocText = aJCas.getDocumentText();
+		if (strDocText == null)
+			return;
+		List<Segment> segmentsAdded = new ArrayList<Segment>();
+		// find all the segments, set begin and id, add to list
+		for (Map.Entry<SegmentRegex, Pattern> entry : regexMap.entrySet()) {
+			if (log.isDebugEnabled()) {
+				log.debug("applying regex:" + entry.getKey().getRegex());
+			}
+			Matcher matcher = entry.getValue().matcher(strDocText);
+			while (matcher.find()) {
+				Segment seg = new Segment(aJCas);
+				if (entry.getKey().isLimitToRegex()
+						&& matcher.groupCount() == 1) {
+					seg.setBegin(matcher.start(1));
+					seg.setEnd(matcher.end(1));
+				} else {
+					seg.setBegin(matcher.start());
+					if (entry.getKey().isLimitToRegex()) {
+						seg.setEnd(matcher.end());
+					}
+				}
+				seg.setId(entry.getKey().getSegmentID());
+				if (log.isDebugEnabled()) {
+					log.debug("found match: id=" + seg.getId() + ", begin="
+							+ seg.getBegin());
+				}
+				segmentsAdded.add(seg);
+			}
+		}
+		if (log.isDebugEnabled()) {
+			log.debug("segmentsAdded: " + segmentsAdded.size());
+		}
+		if (segmentsAdded.size() > 0) {
+			// sort the segments by begin
+			Collections.sort(segmentsAdded, new Comparator<Segment>() {
+
+				// @Override
+				public int compare(Segment o1, Segment o2) {
+					return o1.getBegin() < o2.getBegin() ? -1
+							: o1.getBegin() > o2.getBegin() ? 1 : 0;
+				}
+
+			});
+			// set the end for each segment
+			for (int i = 0; i < segmentsAdded.size(); i++) {
+				Segment seg = segmentsAdded.get(i);
+				Segment segNext = (i + 1) < segmentsAdded.size() ? segmentsAdded
+						.get(i + 1) : null;
+				if (seg.getEnd() <= 0) {
+					if (segNext != null) {
+						// set end to beginning of next segment
+						seg.setEnd(segNext.getBegin() - 1);
+					} else {
+						// set end to doc end
+						seg.setEnd(strDocText.length());
+					}
+				} else {
+					// segments shouldn't overlap
+					if (segNext != null && segNext.getBegin() < seg.getEnd()) {
+						seg.setEnd(segNext.getBegin() - 1);
+					}
+				}
+				if (log.isDebugEnabled()) {
+					log.debug("Adding Segment: segment id=" + seg.getId()
+							+ ", begin=" + seg.getBegin() + ", end="
+							+ seg.getEnd());
+				}
+				seg.addToIndexes();
+			}
+		}
+		// ctakes 1.3.2 - anything not in a segment will not be annotated - add
+		// text outside segments to the 'default' segment
+		int end = 0;
+		for (Segment seg : segmentsAdded) {
+			if ((seg.getBegin() - 1) > end) {
+				addGapSegment(aJCas, end, seg.getBegin() - 1);
+			}
+			end = seg.getEnd();
+		}
+		if (end < strDocText.length()) {
+			addGapSegment(aJCas, end, strDocText.length());
+		}
+	}
+
+	private void addGapSegment(JCas aJCas, int begin, int end) {
+		Segment segGap = new Segment(aJCas);
+		segGap.setBegin(begin);
+		segGap.setEnd(end);
+		segGap.addToIndexes();
+		segGap.setId(defaultSegmentId);
+	}
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SenseDisambiguatorAnnotator.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SenseDisambiguatorAnnotator.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SenseDisambiguatorAnnotator.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SenseDisambiguatorAnnotator.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,210 @@
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.commons.beanutils.BeanUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ctakes.typesystem.type.refsem.OntologyConcept;
+import org.apache.ctakes.typesystem.type.refsem.UmlsConcept;
+import org.apache.ctakes.typesystem.type.textsem.IdentifiedAnnotation;
+import org.apache.ctakes.ytex.kernel.metric.ConceptSimilarityService;
+import org.apache.ctakes.ytex.kernel.metric.ConceptSimilarityService.SimilarityMetricEnum;
+import org.apache.ctakes.ytex.kernel.wsd.WordSenseDisambiguator;
+import org.apache.ctakes.ytex.uima.ApplicationContextHolder;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
+import org.apache.uima.cas.FSIterator;
+import org.apache.uima.cas.FeatureStructure;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.jcas.cas.FSArray;
+import org.apache.uima.jcas.tcas.Annotation;
+import org.apache.uima.resource.ResourceInitializationException;
+
+import com.google.common.base.Strings;
+
+/**
+ * Disambiguate named entities via adapated Lesk algorithm with semantic
+ * similarity. Configuration parameters set in ytex.properties / via -D option /
+ * in config for SenseDisambiguatorAnnotator (minus the 'ytex.' prefix)
+ * <ul>
+ * <li>ytex.sense.windowSize - context window size. concepts from named entities
+ * +- windowSize around the target named entity are used for disambiguation.
+ * defaults to 10
+ * <li>ytex.sense.metric - measure to use. defaults to INTRINSIC_PATH
+ * <li>ytex.conceptGraph - concept graph to use.
+ * <li>ytex.conceptProperty - field of ontology concept to use. Use cui for
+ * UmlsConcept, code for OntologyConcept.
+ * </ul>
+ * 
+ * @author vijay
+ * 
+ */
+public class SenseDisambiguatorAnnotator extends JCasAnnotator_ImplBase {
+	int windowSize = 50;
+	SimilarityMetricEnum metric = SimilarityMetricEnum.INTRINSIC_PATH;
+	WordSenseDisambiguator wsd;
+	boolean disabled = false;
+	String conceptProperty = null;
+	private static final Log log = LogFactory
+			.getLog(SenseDisambiguatorAnnotator.class);
+
+	@Override
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+		super.initialize(aContext);
+		Properties props = ApplicationContextHolder.getYtexProperties();
+		String conceptProperty = (String) aContext
+				.getConfigParameterValue("conceptProperty");
+		if (!Strings.isNullOrEmpty(conceptProperty))
+			this.conceptProperty = conceptProperty;
+		else
+			this.conceptProperty = props.getProperty("ytex.conceptProperty");
+		Integer nWindowSize = (Integer) aContext
+				.getConfigParameterValue("windowSize");
+		if (nWindowSize != null && nWindowSize.intValue() > 0)
+			windowSize = nWindowSize.intValue();
+		else
+			windowSize = Integer.parseInt(props.getProperty(
+					"ytex.sense.windowSize", "50"));
+		String uMetric = (String) aContext.getConfigParameterValue("metric");
+		if (!Strings.isNullOrEmpty(uMetric))
+			metric = SimilarityMetricEnum.valueOf(uMetric);
+		else
+			metric = SimilarityMetricEnum.valueOf(props.getProperty(
+					"ytex.sense.metric", "INTRINSIC_PATH"));
+		wsd = ApplicationContextHolder.getApplicationContext().getBean(
+				WordSenseDisambiguator.class);
+		ConceptSimilarityService simSvc = ApplicationContextHolder
+				.getApplicationContext().getBean(
+						ConceptSimilarityService.class);
+		if (simSvc.getConceptGraph() == null) {
+			log.warn("Concept Graph was not loaded - word sense disambiguation disabled");
+			disabled = true;
+		}
+	}
+
+	@Override
+	public void process(JCas jcas) throws AnalysisEngineProcessException {
+		if (disabled)
+			return;
+		// iterate through sentences
+		FSIterator<Annotation> neIter = jcas.getAnnotationIndex(
+				IdentifiedAnnotation.type).iterator();
+		List<IdentifiedAnnotation> listNE = new ArrayList<IdentifiedAnnotation>();
+		while (neIter.hasNext()) {
+			listNE.add((IdentifiedAnnotation) neIter.next());
+		}
+		// disambiguate the named entities
+		disambiguate(jcas, listNE);
+	}
+
+	/**
+	 * get the concept id for the specified concept. if conceptProperty is
+	 * defined, get that property. for UmlsConcept, return cui for
+	 * OntologyConcept, return code
+	 * 
+	 * @param oc
+	 * @return
+	 * @throws IllegalAccessException
+	 * @throws ReflectiveOperationException
+	 * @throws NoSuchMethodException
+	 */
+	protected String getConceptId(FeatureStructure oc)
+			throws AnalysisEngineProcessException {
+		try {
+
+			if (!Strings.isNullOrEmpty(this.conceptProperty))
+				return BeanUtils.getProperty(oc, conceptProperty);
+			if (oc instanceof UmlsConcept) {
+				return ((UmlsConcept) oc).getCui();
+			} else if (oc instanceof OntologyConcept) {
+				return ((OntologyConcept) oc).getCode();
+			} else {
+				throw new IllegalArgumentException(
+						"don't know how to get concept id for: "
+								+ oc.getClass().getName());
+			}
+		} catch (IllegalAccessException e) {
+			throw new AnalysisEngineProcessException(e);
+		} catch (InvocationTargetException e) {
+			throw new AnalysisEngineProcessException(e);
+		} catch (NoSuchMethodException e) {
+			throw new AnalysisEngineProcessException(e);
+		}
+
+	}
+
+	/**
+	 * 
+	 * @param jcas
+	 * @param listNE
+	 *            list of named entities to disambiguate
+	 */
+	protected void disambiguate(JCas jcas, List<IdentifiedAnnotation> listNE)
+			throws AnalysisEngineProcessException {
+		// allocate list to hold IdentifiedAnnotations with concepts
+		List<IdentifiedAnnotation> listNonTrivialNE = new ArrayList<IdentifiedAnnotation>();
+		// allocate list to hold concepts in each named entity
+		List<Set<String>> listConcept = new ArrayList<Set<String>>();
+		for (IdentifiedAnnotation ne : listNE) {
+			FSArray concepts = ne.getOntologyConceptArr();
+			// add the concept senses from each named entity
+			if (concepts != null && concepts.size() > 0) {
+				listNonTrivialNE.add(ne);
+				Set<String> conceptSenses = new HashSet<String>();
+				listConcept.add(conceptSenses);
+				for (int i = 0; i < concepts.size(); i++) {
+
+					if (concepts.get(i) != null) {
+						String conceptId = this.getConceptId(concepts.get(i));
+						if (!Strings.isNullOrEmpty(conceptId))
+							conceptSenses.add(conceptId);
+					}
+				}
+			}
+		}
+		if(log.isTraceEnabled())
+			log.trace("listConcept: " + listConcept);
+		// iterate through named entities and disambiguate
+		for (int i = 0; i < listConcept.size(); i++) {
+			Set<String> conceptSenses = listConcept.get(i);
+			// only bother with wsd if there is more than one sense
+			if (conceptSenses.size() > 1) {
+				if(log.isTraceEnabled())
+					log.trace("i: " + i + ", conceptSenses: " + conceptSenses);
+				Map<String, Double> scores = new HashMap<String, Double>();
+				String concept = this.wsd.disambiguate(listConcept, i, null,
+						windowSize, metric, scores, true);
+				if(log.isTraceEnabled())
+					log.trace("i: " + i + ", concept: " + concept);
+				IdentifiedAnnotation ne = listNonTrivialNE.get(i);
+				FSArray concepts = ne.getOntologyConceptArr();
+				for (int j = 0; j < concepts.size(); j++) {
+					OntologyConcept yoc = (OntologyConcept) concepts.get(j);
+					String conceptId = this.getConceptId(yoc);
+					// update the score and set the predicted concept field
+					if (concept == null || concept.equals(conceptId))
+						yoc.setDisambiguated(true);
+					if (scores.containsKey(conceptId))
+						yoc.setScore(scores.get(conceptId));
+
+				}
+			} else if (conceptSenses.size() == 1) {
+				// only one concept - for ytex concept set the predicted concept
+				IdentifiedAnnotation ne = listNonTrivialNE.get(i);
+				FSArray concepts = ne.getOntologyConceptArr();
+				OntologyConcept oc = (OntologyConcept) concepts.get(0);
+				oc.setDisambiguated(true);
+			}
+		}
+	}
+}

Added: ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SentenceDetector.java
URL: http://svn.apache.org/viewvc/ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SentenceDetector.java?rev=1555281&view=auto
==============================================================================
--- ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SentenceDetector.java (added)
+++ ctakes/branches/ytex/ctakes-ytex-uima/src/main/java/org/apache/ctakes/ytex/uima/annotators/SentenceDetector.java Fri Jan  3 23:22:58 2014
@@ -0,0 +1,484 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ctakes.ytex.uima.annotators;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
+
+import opennlp.tools.sentdetect.DefaultSDContextGenerator;
+import opennlp.tools.sentdetect.SentenceModel;
+import opennlp.tools.util.InvalidFormatException;
+
+import org.apache.ctakes.core.resource.FileLocator;
+import org.apache.ctakes.core.sentence.EndOfSentenceScannerImpl;
+import org.apache.ctakes.core.sentence.SentenceDetectorCtakes;
+import org.apache.ctakes.core.util.ParamUtil;
+import org.apache.ctakes.typesystem.type.textspan.Segment;
+import org.apache.ctakes.typesystem.type.textspan.Sentence;
+import org.apache.log4j.Logger;
+import org.apache.uima.UimaContext;
+import org.apache.uima.analysis_component.JCasAnnotator_ImplBase;
+import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
+import org.apache.uima.analysis_engine.annotator.AnnotatorProcessException;
+import org.apache.uima.jcas.JCas;
+import org.apache.uima.jcas.JFSIndexRepository;
+import org.apache.uima.resource.ResourceAccessException;
+import org.apache.uima.resource.ResourceInitializationException;
+
+import com.google.common.base.Strings;
+
+/**
+ * Wraps the OpenNLP sentence detector in a UIMA annotator.
+ * 
+ * Changes:
+ * <ul>
+ * <li>split on paragraphs before feeding into maximum entropy model
+ * <li>don't split on newlines
+ * <li>split on periods
+ * <li>split on semi-structured text such as checkboxes
+ * </ul>
+ * 
+ * Parameters (optional):
+ * <ul>
+ * <li>paragraphPattern: regex to split paragraphs. default PARAGRAPH_PATTERN
+ * <li>acronymPattern: default ACRONYM_PATTERN. If the text preceding period
+ * matches this pattern, we do not split at the period
+ * <li>periodPattern: default PERIOD_PATTERN. If the text following period
+ * matches this pattern, we split it.
+ * <li>splitPattern: regex to split at semi-structured fields. default
+ * SPLIT_PATTERN
+ * </ul>
+ * 
+ * 
+ * 
+ * @author Mayo Clinic
+ * @author vijay
+ */
+public class SentenceDetector extends JCasAnnotator_ImplBase {
+	/**
+	 * Value is "SegmentsToSkip". This parameter specifies which sections to
+	 * skip. The parameter should be of type String, should be multi-valued and
+	 * optional.
+	 */
+	public static final String PARAM_SEGMENTS_TO_SKIP = "SegmentsToSkip";
+
+	// LOG4J logger based on class name
+	private Logger logger = Logger.getLogger(getClass().getName());
+
+	public static final String SD_MODEL_FILE_PARAM = "SentenceModelFile";
+
+	private opennlp.tools.sentdetect.SentenceModel sdmodel;
+	/**
+	 * vng change split paragraphs on this pattern
+	 */
+	public static final String PARAGRAPH_PATTERN = "(?m):\\r{0,1}\\n|\\r{0,1}\\n\\r{0,1}\\n";
+	/**
+	 * vng change split sentences periods that do not have this acronym
+	 * preceding it
+	 */
+	public static final String ACRONYM_PATTERN = "(?m)Dr\\z|Ms\\z|Mr\\z|Mrs\\z|Ms\\z|\\p{Upper}\\z";
+	/**
+	 * vng change split sentences periods after which this pattern is seen
+	 */
+	public static final String PERIOD_PATTERN = "(?m)\\A\\s+\\p{Upper}|\\A\\s+\\d\\.";
+	/**
+	 * vng change split sentences on these patterns
+	 */
+	public static final String SPLIT_PATTERN = "(?im)\\n[\\(\\[]\\s*[yesxno]{0,3}\\s*[\\)\\]]|[\\(\\[]\\s*[yesxno]{0,3}\\s*[\\)\\]]\\s*\\r{0,1}\\n|^[^:\\r\\n]{3,20}\\:[^\\r\\n]{3,20}$";
+	/**
+	 * vng change
+	 */
+	private Pattern paragraphPattern;
+	/**
+	 * vng change
+	 */
+	private Pattern splitPattern;
+	/**
+	 * vng change
+	 */
+	private Pattern periodPattern;
+	/**
+	 * vng change
+	 */
+	private Pattern acronymPattern;
+
+	private UimaContext context;
+
+	private Set<?> skipSegmentsSet;
+
+	private SentenceDetectorCtakes sentenceDetector;
+
+	private String NEWLINE = "\n";
+
+	private int sentenceCount = 0;
+
+	public void initialize(UimaContext aContext)
+			throws ResourceInitializationException {
+
+		super.initialize(aContext);
+		logger.info(Arrays.asList(aContext.getConfigParameterNames()));
+
+		context = aContext;
+		try {
+			configInit();
+		} catch (Exception ace) {
+			throw new ResourceInitializationException(ace);
+		}
+	}
+
+	/**
+	 * Reads configuration parameters.
+	 * 
+	 * @throws ResourceAccessException
+	 * @throws IOException 
+	 * @throws InvalidFormatException 
+	 */
+	private void configInit() throws ResourceAccessException, InvalidFormatException, IOException {
+
+		String sdModelPath = (String) context
+				.getConfigParameterValue(SD_MODEL_FILE_PARAM);
+			InputStream is = FileLocator.getAsStream(sdModelPath);
+			logger.info("Sentence detector model file: " + sdModelPath);
+			sdmodel = new SentenceModel(is);
+			is.close();
+			EndOfSentenceScannerImpl eoss = new EndOfSentenceScannerImpl();
+			char[] eosc = eoss.getEndOfSentenceCharacters();
+			// SentenceDContextGenerator cg = new SentenceDContextGenerator();
+			DefaultSDContextGenerator cg = new DefaultSDContextGenerator(eosc);
+			sentenceDetector = new SentenceDetectorCtakes(
+					sdmodel.getMaxentModel(), cg, eoss);
+
+			skipSegmentsSet = ParamUtil.getStringParameterValuesSet(
+					PARAM_SEGMENTS_TO_SKIP, context);
+			// vng change begin
+			paragraphPattern = compilePatternCheck("paragraphPattern",
+					PARAGRAPH_PATTERN);
+			splitPattern = compilePatternCheck("splitPattern", SPLIT_PATTERN);
+			periodPattern = compilePatternCheck("periodPattern", PERIOD_PATTERN);
+			acronymPattern = compilePatternCheck("acronymPattern", ACRONYM_PATTERN);
+			// vng change end
+	}
+	/**
+	 * vng change
+	 */
+	private Pattern compilePatternCheck(String patternKey, String patternDefault) {
+		String strPattern = (String) context
+				.getConfigParameterValue(patternKey);
+		if (strPattern == null)
+			strPattern = patternDefault;
+		Pattern pat = null;
+		try {
+			pat = Strings.isNullOrEmpty(strPattern) ? null : Pattern
+					.compile(strPattern);
+		} catch (PatternSyntaxException pse) {
+			logger.warn("ignoring bad pattern, reverting to default: "
+					+ strPattern, pse);
+			pat = Pattern.compile(patternDefault);
+		}
+		return pat;
+	}
+
+	/**
+	 * Entry point for processing.
+	 */
+	public void process(JCas jcas) throws AnalysisEngineProcessException {
+
+		logger.info("Starting processing.");
+
+		sentenceCount = 0;
+
+		String text = jcas.getDocumentText();
+
+		JFSIndexRepository indexes = jcas.getJFSIndexRepository();
+		Iterator<?> sectionItr = indexes.getAnnotationIndex(Segment.type)
+				.iterator();
+		while (sectionItr.hasNext()) {
+			Segment sa = (Segment) sectionItr.next();
+			String sectionID = sa.getId();
+			if (!skipSegmentsSet.contains(sectionID)) {
+				sentenceCount = annotateParagraph(jcas, text, sa.getBegin(),
+						sa.getEnd(), sentenceCount);
+			}
+		}
+	}
+
+	/**
+	 * split paragraphs. Arc v1.0 had a paragraph splitter, and sentences never
+	 * crossed paragraph boundaries. paragraph splitter was lost in upgrade to
+	 * ctakes 1.3.2. Now split paragraphs before running through maximum entropy
+	 * model - this resolves situations where the model would split after a
+	 * period, e.g.:
+	 * 
+	 * <pre>
+	 * Clinical History:
+	 * Mr. So and so
+	 * </pre>
+	 * 
+	 * Without the paragraph splitter, the model splits after Mr. With the
+	 * paragraph splitter, the model doesn't split after Mr.
+	 * 
+	 * @param jcas
+	 * @param text
+	 * @param b
+	 * @param e
+	 * @param sentenceCount
+	 * @return
+	 * @throws AnalysisEngineProcessException 
+	 * @throws AnnotatorProcessException
+	 */
+	protected int annotateParagraph(JCas jcas, String text, int b, int e,
+			int sentenceCount) throws AnalysisEngineProcessException {
+		if (this.paragraphPattern == null) {
+			return this.annotateRange(jcas, text, b, e, sentenceCount);
+		} else {
+			int lastEnd = b;
+			Matcher m = paragraphPattern.matcher(text);
+			while (m.find()) {
+				if (m.end() > b && m.end() < e) {
+					sentenceCount = annotateRange(jcas, text, lastEnd, m.end(),
+							sentenceCount);
+					lastEnd = m.end();
+				} else if (m.end() >= e) {
+					break;
+				}
+			}
+			sentenceCount = annotateRange(jcas, text, lastEnd, e, sentenceCount);
+			return sentenceCount;
+		}
+	}
+
+	/**
+	 * Detect sentences within a section of the text and add annotations to the
+	 * CAS. Uses OpenNLP sentence detector, and then additionally forces
+	 * sentences to end at end-of-line characters (splitting into multiple
+	 * sentences). Also trims sentences. And if the sentence detector does
+	 * happen to form a sentence that is just white space, it will be ignored.
+	 * 
+	 * @param jcas
+	 *            view of the CAS containing the text to run sentence detector
+	 *            against
+	 * @param text
+	 *            the document text
+	 * @param section
+	 *            the section this sentence is in
+	 * @param sentenceCount
+	 *            the number of sentences added already to the CAS (if
+	 *            processing one section at a time)
+	 * @return count The sum of <code>sentenceCount</code> and the number of
+	 *         Sentence annotations added to the CAS for this section
+	 * @throws AnnotatorProcessException
+	 */
+	protected int annotateRange(JCas jcas, String text, int b, int e,
+			int sentenceCount) throws AnalysisEngineProcessException {
+
+		// vng change begin
+		// int b = section.getBegin();
+		// int e = section.getEnd();
+		// vng chang end
+
+		// Use OpenNLP tools to split text into sentences
+		// The sentence detector returns the offsets of the sentence-endings it
+		// detects
+		// within the string
+		int[] sentenceBreaks = sentenceDetector.sentPosDetect(text.substring(b,
+				e)); // OpenNLP tools 1.5 returns Spans rather than offsets that
+						// 1.4 did
+		int numSentences = sentenceBreaks.length;
+		// There might be text after the last sentence-ending found by detector,
+		// so +1
+		SentenceSpan[] potentialSentSpans = new SentenceSpan[numSentences + 1];
+
+		int sentStart = b;
+		int sentEnd = b;
+		// Start by filling in sentence spans from what OpenNLP tools detected
+		// Will trim leading or trailing whitespace when check for end-of-line
+		// characters
+		for (int i = 0; i < numSentences; i++) {
+			sentEnd = sentenceBreaks[i] + b; // OpenNLP tools 1.5 returns Spans
+												// rather than offsets that 1.4
+												// did
+			String coveredText = text.substring(sentStart, sentEnd);
+			potentialSentSpans[i] = new SentenceSpan(sentStart, sentEnd,
+					coveredText);
+			sentStart = sentEnd;
+		}
+
+		// If detector didn't find any sentence-endings,
+		// or there was text after the last sentence-ending found,
+		// create a sentence from what's left, as long as it's not all
+		// whitespace.
+		// Will trim leading or trailing whitespace when check for end-of-line
+		// characters
+		if (sentEnd < e) {
+			String coveredText = text.substring(sentEnd, e);
+			if (coveredText.trim() != "") {
+				potentialSentSpans[numSentences] = new SentenceSpan(sentEnd, e,
+						coveredText);
+				numSentences++;
+			}
+		}
+
+		// Copy potentialSentSpans into sentenceSpans,
+		// ignoring any that are entirely whitespace,
+		// trimming the rest,
+		// and splitting any of those that contain an end-of-line character.
+		// Then trim any leading or trailing whitespace of ones that were split.
+		ArrayList<SentenceSpan> sentenceSpans1 = new ArrayList<SentenceSpan>(0);
+		for (int i = 0; i < potentialSentSpans.length; i++) {
+			if (potentialSentSpans[i] != null) {
+				sentenceSpans1.addAll(potentialSentSpans[i]
+						.splitAtLineBreaksAndTrim(NEWLINE)); // TODO Determine
+																// line break
+																// type
+			}
+		}
+		// vng change begin
+		// split at ".  "
+		ArrayList<SentenceSpan> sentenceSpans = new ArrayList<SentenceSpan>(
+				sentenceSpans1.size());
+		for (SentenceSpan span : sentenceSpans1) {
+			if (span != null) {
+				sentenceSpans.addAll(span.splitAtPeriodAndTrim(acronymPattern,
+						periodPattern, splitPattern));
+			}
+		}
+		// vng change end
+
+		// Add sentence annotations to the CAS
+		int previousEnd = -1;
+		for (int i = 0; i < sentenceSpans.size(); i++) {
+			SentenceSpan span = sentenceSpans.get(i);
+			if (span.getStart() != span.getEnd()) { // skip empty lines
+				Sentence sa = new Sentence(jcas);
+				sa.setBegin(span.getStart());
+				sa.setEnd(span.getEnd());
+				if (previousEnd <= sa.getBegin()) {
+					// System.out.println("Adding Sentence Annotation for " +
+					// span.toString());
+					sa.setSentenceNumber(sentenceCount);
+					sa.addToIndexes();
+					sentenceCount++;
+					previousEnd = span.getEnd();
+				} else {
+					logger.error("Skipping sentence from " + span.getStart()
+							+ " to " + span.getEnd());
+					logger.error("Overlap with previous sentence that ended at "
+							+ previousEnd);
+				}
+			}
+		}
+		return sentenceCount;
+	}
+
+	/**
+	 * Train a new sentence detector from the training data in the first file
+	 * and write the model to the second file.<br>
+	 * The training data file is expected to have one sentence per line.
+	 * 
+	 * @param args
+	 *            training_data_filename name_of_model_to_create iters? cutoff?
+	 * @throws IOException
+	 */
+	public static void main(String[] args) throws IOException {
+		final Logger logger = Logger.getLogger(SentenceDetector.class.getName()
+				+ ".main()");
+
+		// Handle arguments
+		if (args.length < 2 || args.length > 4) {
+			usage(logger);
+			System.exit(-1);
+		}
+
+		File inFile = getReadableFile(args[0]);
+
+		File outFile = getFileInExistingDir(args[1]);
+		// File outFile = new File(args[1]);
+
+		int iters = 100;
+		if (args.length > 2) {
+			iters = parseInt(args[2], logger);
+		}
+
+		int cut = 5;
+		if (args.length > 3) {
+			cut = parseInt(args[3], logger);
+		}
+
+		// Now, do the actual training
+		EndOfSentenceScannerImpl scanner = new EndOfSentenceScannerImpl();
+		int numEosc = scanner.getEndOfSentenceCharacters().length;
+
+		logger.info("Training new model from " + inFile.getAbsolutePath());
+		logger.info("Using " + numEosc + " end of sentence characters.");
+
+		logger.error("----------------------------------------------------------------------------------");
+		logger.error("Need to update yet for OpenNLP changes "); // TODO
+		logger.error("Commented out code that no longer compiles due to OpenNLP API incompatible changes"); // TODO
+		logger.error("----------------------------------------------------------------------------------");
+		// GISModel mod = SentenceDetectorME.train(inFile, iters, cut, scanner);
+		// SuffixSensitiveGISModelWriter ssgmw = new
+		// SuffixSensitiveGISModelWriter(
+		// mod, outFile);
+		// logger.info("Saving the model as: " + outFile.getAbsolutePath());
+		// ssgmw.persist();
+
+	}
+
+	public static void usage(Logger log) {
+		log.info("Usage: java "
+				+ SentenceDetector.class.getName()
+				+ " training_data_filename name_of_model_to_create <iters> <cut>");
+	}
+
+	public static int parseInt(String s, Logger log) {
+		try {
+			return Integer.parseInt(s);
+		} catch (NumberFormatException nfe) {
+			log.error("Unable to parse '" + s + "' as an integer.");
+			throw (nfe);
+		}
+	}
+
+	public static File getReadableFile(String fn) throws IOException {
+		File f = new File(fn);
+		if (!f.canRead()) {
+			throw new IOException("Unable to read from file "
+					+ f.getAbsolutePath());
+		}
+		return f;
+	}
+
+	public static File getFileInExistingDir(String fn) throws IOException {
+		File f = new File(fn);
+		if (!f.getParentFile().isDirectory()) {
+			throw new IOException("Directory not found: "
+					+ f.getParentFile().getAbsolutePath());
+		}
+		return f;
+	}
+
+}



Mime
View raw message