lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dougs...@apache.org
Subject svn commit: r798995 [3/35] - in /incubator/lucene.net/trunk/C#/src: Lucene.Net/ Lucene.Net/Analysis/ Lucene.Net/Analysis/Standard/ Lucene.Net/Document/ Lucene.Net/Index/ Lucene.Net/QueryParser/ Lucene.Net/Search/ Lucene.Net/Search/Function/ Lucene.Net/...
Date Wed, 29 Jul 2009 18:04:24 GMT
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Document.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs Wed Jul 29 18:04:12 2009
@@ -35,10 +35,9 @@
 	/// 
 	/// <p>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
 	/// <i>not</i> available in documents retrieved from the index, e.g. with {@link
-	/// Hits#Doc(int)}, {@link Searcher#Doc(int)} or {@link
+	/// ScoreDoc#Doc(int)}, {@link Searcher#Doc(int)} or {@link
 	/// IndexReader#Document(int)}.
 	/// </summary>
-	
 	[Serializable]
 	public sealed class Document
 	{
@@ -53,7 +52,7 @@
 				this.enclosingInstance = enclosingInstance;
 				iter = Enclosing_Instance.fields.GetEnumerator();
 			}
-			private System.Object tempAuxObj;
+			private object tempAuxObj;
 			public bool MoveNext()
 			{
                 bool result = HasMoreElements();
@@ -67,7 +66,7 @@
 			{
 				tempAuxObj = null;
 			}
-			public System.Object Current
+			public object Current
 			{
 				get
 				{
@@ -89,7 +88,7 @@
 			{
 				return iter.MoveNext();
 			}
-            public System.Object NextElement()
+            public object NextElement()
 			{
 				return iter.Current;
 			}
@@ -261,23 +260,24 @@
 		
 		/// <summary>Returns a List of all the fields in a document.
 		/// <p>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
-		/// <i>not</i> available in documents retrieved from the index, e.g. with {@link
-		/// Hits#Doc(int)}, {@link Searcher#Doc(int)} or {@link IndexReader#Document(int)}.
+		/// <i>not</i> available in documents retrieved from the index, e.g. 
+		/// {@link Searcher#Doc(int)} or {@link IndexReader#Document(int)}.
 		/// </summary>
 		public System.Collections.IList GetFields()
 		{
 			return fields;
 		}
-		
+
+        private static readonly Field[] NO_FIELDS = new Field[0];
+
 		/// <summary> Returns an array of {@link Field}s with the given name.
-		/// This method can return <code>null</code>.
 		/// Do not use with lazy loaded fields.
-		/// 
+		/// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
 		/// </summary>
 		/// <param name="name">the name of the field
 		/// </param>
-		/// <returns> a <code>Field[]</code> array
-		/// </returns>
+		/// <returns> a <code>Field[]</code> array</returns>
 		public Field[] GetFields(System.String name)
 		{
 			System.Collections.ArrayList result = new System.Collections.ArrayList();
@@ -291,20 +291,21 @@
 			}
 			
 			if (result.Count == 0)
-				return null;
+				return NO_FIELDS;
 			
 			return (Field[]) result.ToArray(typeof(Field));
 		}
+
+        private static readonly Fieldable[] NO_FIELDABLES = new Fieldable[0];
 		
 		
 		/// <summary> Returns an array of {@link Fieldable}s with the given name.
-		/// This method can return <code>null</code>.
-		/// 
+		/// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
 		/// </summary>
 		/// <param name="name">the name of the field
 		/// </param>
-		/// <returns> a <code>Fieldable[]</code> array or <code>null</code>
-		/// </returns>
+		/// <returns> a <code>Fieldable[]</code> array</returns>
 		public Fieldable[] GetFieldables(System.String name)
 		{
 			System.Collections.ArrayList result = new System.Collections.ArrayList();
@@ -318,20 +319,20 @@
 			}
 			
 			if (result.Count == 0)
-				return null;
+				return NO_FIELDABLES;
 			
 			return (Fieldable[]) result.ToArray(typeof(Field));
 		}
-		
+
+        private static readonly string[] NO_STRINGS = new string[0];
 		
 		/// <summary> Returns an array of values of the field specified as the method parameter.
-		/// This method can return <code>null</code>.
-		/// 
-		/// </summary>
+        /// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
+        /// </summary>
 		/// <param name="name">the name of the field
 		/// </param>
-		/// <returns> a <code>String[]</code> of field values or <code>null</code>
-		/// </returns>
+		/// <returns> a <code>String[]</code> of field values</returns>
 		public System.String[] GetValues(System.String name)
 		{
 			System.Collections.ArrayList result = new System.Collections.ArrayList();
@@ -341,22 +342,26 @@
 				if (field.Name().Equals(name) && (!field.IsBinary()))
 					result.Add(field.StringValue());
 			}
-			
+            /// This method returns an empty array when there are no
+            /// matching fields.  It never returns null.
+
 			if (result.Count == 0)
-				return null;
+				return NO_STRINGS;
 			
 			return (System.String[]) (result.ToArray(typeof(System.String)));
 		}
-		
-		/// <summary> Returns an array of byte arrays for of the fields that have the name specified
-		/// as the method parameter. This method will return <code>null</code> if no
-		/// binary fields with the specified name are available.
-		/// 
-		/// </summary>
+
+        private static readonly byte[][] NO_BYTES = new byte[0][];
+
+        /// <summary>
+        /// Returns an array of byte arrays for the fields that have the name
+        /// specified as the method parameter.
+        /// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
+        /// </summary>
 		/// <param name="name">the name of the field
 		/// </param>
-		/// <returns> a  <code>byte[][]</code> of binary field values or <code>null</code>
-		/// </returns>
+		/// <returns> a  <code>byte[][]</code> of binary field values</returns>
 		public byte[][] GetBinaryValues(System.String name)
 		{
 			System.Collections.IList result = new System.Collections.ArrayList();
@@ -375,13 +380,13 @@
 			}
 			
 			if (result.Count == 0)
-				return null;
+				return NO_BYTES;
 			
             System.Collections.ICollection c = result;
-            System.Object[] objects = new byte[result.Count][];
+            object[] objects = new byte[result.Count][];
 
             System.Type type = objects.GetType().GetElementType();
-            System.Object[] objs = (System.Object[]) Array.CreateInstance(type, c.Count );
+            object[] objs = (object[]) Array.CreateInstance(type, c.Count );
 
             System.Collections.IEnumerator e = c.GetEnumerator();
             int ii = 0;
@@ -395,7 +400,7 @@
 
             return (byte[][]) objs;
         }
-		
+
 		/// <summary> Returns an array of bytes for the first (or only) field that has the name
 		/// specified as the method parameter. This method will return <code>null</code>
 		/// if no binary fields with the specified name are available.

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Field.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs Wed Jul 29 18:04:12 2009
@@ -75,29 +75,51 @@
 			/// </summary>
 			public static readonly Index NO = new Index("NO");
 			
-			/// <summary>Index the field's value so it can be searched. An Analyzer will be used
-			/// to tokenize and possibly further normalize the text before its
-			/// terms will be stored in the index. This is useful for common text.
+			/// <summary>
+            /// Index the tokens produced by running the field's value through
+            /// an Analyzer. This is useful for common text.
 			/// </summary>
-			public static readonly Index TOKENIZED = new Index("TOKENIZED");
+            public static readonly Index ANALYZED = new Index("ANALYZED");
+            /// <summary>
+            /// Deprecated.  This has been renamed to ANALYZED.
+            /// </summary>
+            public static readonly Index TOKENIZED = ANALYZED;
 			
 			/// <summary>Index the field's value without using an Analyzer, so it can be searched.
 			/// As no analyzer is used the value will be stored as a single term. This is
 			/// useful for unique Ids like product numbers.
 			/// </summary>
-			public static readonly Index UN_TOKENIZED = new Index("UN_TOKENIZED");
-			
-			/// <summary>Index the field's value without an Analyzer, and disable
-			/// the storing of norms.  No norms means that index-time boosting
-			/// and field length normalization will be disabled.  The benefit is
-			/// less memory usage as norms take up one byte per indexed field
-			/// for every document in the index.
-			/// Note that once you index a given field <i>with</i> norms enabled,
-			/// disabling norms will have no effect.  In other words, for NO_NORMS
-			/// to have the above described effect on a field, all instances of that
-			/// field must be indexed with NO_NORMS from the beginning.
-			/// </summary>
-			public static readonly Index NO_NORMS = new Index("NO_NORMS");
+            public static readonly Index NOT_ANALYZED = new Index("NOT_ANALYZED");
+            /// <summary>
+            /// Deprecated.  This has been renamed to NOT_ANALYZED.
+            /// </summary>
+            public static readonly Index UN_TOKENIZED = NOT_ANALYZED;
+
+            /// <summary>
+            /// Expert: Index the field's value without an Analyzer, and also
+            /// disable the storing of norms.. Note that you can also separately
+            /// enable/disable norms by calling SetOmitNorms(bool).  No norms means
+            /// that index-time field and document boosting and field length normalization
+            /// are disabled.  The benfit is less memory ysage as norms take up one byte
+            /// of RAM per indexed field for every document in the index, during searching.
+            /// Note that once you index a given field <i>with</i> norms enabled, disabling
+            /// norms will have no effect.  In other words, for this to have the above
+            /// described effect on a field, all instances of that field must be indexed
+            /// with NOT_ANALYZED_NO_NORMS from the beginning.
+            /// </summary>
+            public static readonly Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
+            /// <summary>
+            /// Deprecated.  This has been renamed to NOT_ANALYZED_NO_NORMS.
+            /// </summary>
+            public static readonly Index NO_NORMS = NOT_ANALYZED_NO_NORMS;
+
+            /// <summary>
+            /// Expert: Index the tokens produced by running the field's value 
+            /// through an Analyzer, and also separately disable the storing of norms.
+            /// See {@link #NOT_ANALYZED_NO_NORMS} for what norms are and why you
+            /// may want to disable them.
+            /// </summary>
+            public static readonly Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
 		}
 		
 		/// <summary>Specifies whether and how a field should have term vectors. </summary>
@@ -146,7 +168,7 @@
 		
 		/// <summary>The value of the field as a String, or null.  If null, the Reader value,
 		/// binary value, or TokenStream value is used.  Exactly one of stringValue(), 
-		/// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+		/// readerValue(), getBinaryValue(), and tokenStreamValue() must be set. 
 		/// </summary>
 		public override System.String StringValue()
 		{
@@ -155,7 +177,7 @@
 		
 		/// <summary>The value of the field as a Reader, or null.  If null, the String value,
 		/// binary value, or TokenStream value is used.  Exactly one of stringValue(), 
-		/// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+		/// readerValue(), getBinaryValue(), and tokenStreamValue() must be set. 
 		/// </summary>
 		public override System.IO.TextReader ReaderValue()
 		{
@@ -164,23 +186,25 @@
 		
 		/// <summary>The value of the field in Binary, or null.  If null, the Reader value,
 		/// String value, or TokenStream value is used. Exactly one of stringValue(), 
-		/// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+		/// readerValue(), GetBinaryValue(), and tokenStreamValue() must be set. 
 		/// </summary>
-		public override byte[] BinaryValue()
+        [Obsolete(
+            "This method must allocate a new byte[] if the AbstractField.GetBinaryOffset() is non-zero or AbstractField.GetBinaryLength() is not the full length of the byte[]. Please use AbstractField.GetBinaryValue() instead, which simply returns the byte[]."
+            )]
+        public override byte[] BinaryValue()
 		{
 			return fieldsData is byte[] ? (byte[]) fieldsData : null;
 		}
 		
 		/// <summary>The value of the field as a TokesStream, or null.  If null, the Reader value,
 		/// String value, or binary value is used. Exactly one of stringValue(), 
-		/// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+		/// readerValue(), GetBinaryValue(), and tokenStreamValue() must be set. 
 		/// </summary>
 		public override TokenStream TokenStreamValue()
 		{
 			return fieldsData is TokenStream ? (TokenStream) fieldsData : null;
 		}
 		
-		
 		/// <summary><p>Expert: change the value of this field.  This can
 		/// be used during indexing to re-use a single Field
 		/// instance to improve indexing speed by avoiding GC cost
@@ -211,9 +235,19 @@
 		public void  SetValue(byte[] value_Renamed)
 		{
 			fieldsData = value_Renamed;
-		}
-		
-		/// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+            binaryOffset = 0;
+            binaryLength = value_Renamed.Length;
+        }
+
+        /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+        public void SetValue(byte[] value_Renamed, int offset, int length)
+        {
+            fieldsData = value_Renamed;
+            binaryOffset = offset;
+            binaryLength = length;
+        }
+
+        /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
 		public void  SetValue(TokenStream value_Renamed)
 		{
 			fieldsData = value_Renamed;
@@ -301,23 +335,29 @@
 				this.isIndexed = false;
 				this.isTokenized = false;
 			}
-			else if (index == Index.TOKENIZED)
+			else if (index == Index.ANALYZED)
 			{
 				this.isIndexed = true;
 				this.isTokenized = true;
 			}
-			else if (index == Index.UN_TOKENIZED)
+			else if (index == Index.NOT_ANALYZED)
 			{
 				this.isIndexed = true;
 				this.isTokenized = false;
 			}
-			else if (index == Index.NO_NORMS)
+            else if (index == Index.NOT_ANALYZED_NO_NORMS)
 			{
 				this.isIndexed = true;
 				this.isTokenized = false;
 				this.omitNorms = true;
 			}
-			else
+            else if (index == Index.ANALYZED_NO_NORMS)
+            {
+                this.isIndexed = true;
+                this.isTokenized = true;
+                this.omitNorms = true;
+            }
+            else
 			{
 				throw new System.ArgumentException("unknown index parameter " + index);
 			}
@@ -428,35 +468,46 @@
 		}
 		
 		
-		/// <summary> Create a stored field with binary value. Optionally the value may be compressed.
-		/// 
+		/// <summary>
+        /// Create a stored field with binary value. Optionally the value may be compressed.
 		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="value">The binary value
-		/// </param>
-		/// <param name="store">How <code>value</code> should be stored (compressed or not)
-		/// </param>
-		/// <throws>  IllegalArgumentException if store is <code>Store.NO</code>  </throws>
+		/// <param name="name">The name of the field</param>
+		/// <param name="value">The binary value</param>
+		/// <param name="store">How <code>value</code> should be stored (compressed or not)</param>
+		/// <throws>System.ArgumentException if store is <code>Store.NO</code></throws>
 		public Field(System.String name, byte[] value_Renamed, Store store)
+            : this(name, value_Renamed, 0, value_Renamed.Length, store)
 		{
+        }
+
+		/// <summary>
+        /// Create a stored field with binary value. Optionally the value may be compressed.
+		/// </summary>
+		/// <param name="name">The name of the field</param>
+		/// <param name="value">The binary value</param>
+        /// <param name="offset"></param>
+        /// <param name="length"></param>
+		/// <param name="store">How <code>value</code> should be stored (compressed or not)</param>
+		/// <throws>System.ArgumentException if store is <code>Store.NO</code></throws>
+		public Field(System.String name, byte[] value_Renamed, int offset, int length, Store store)
+        {
 			if (name == null)
 				throw new System.ArgumentException("name cannot be null");
 			if (value_Renamed == null)
 				throw new System.ArgumentException("value cannot be null");
 			
 			this.name = String.Intern(name);
-			this.fieldsData = value_Renamed;
+			fieldsData = value_Renamed;
 			
 			if (store == Store.YES)
 			{
-				this.isStored = true;
-				this.isCompressed = false;
+				isStored = true;
+				isCompressed = false;
 			}
 			else if (store == Store.COMPRESS)
 			{
-				this.isStored = true;
-				this.isCompressed = true;
+				isStored = true;
+				isCompressed = true;
 			}
 			else if (store == Store.NO)
 				throw new System.ArgumentException("binary values can't be unstored");
@@ -465,11 +516,13 @@
 				throw new System.ArgumentException("unknown store parameter " + store);
 			}
 			
-			this.isIndexed = false;
-			this.isTokenized = false;
-			
-			this.isBinary = true;
+			isIndexed = false;
+			isTokenized = false;
 			
+			isBinary = true;
+            binaryLength = length;
+            binaryOffset = offset;
+
 			SetStoreTermVector(TermVector.NO);
 		}
 	}

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/FieldSelectorResult.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/FieldSelectorResult.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/FieldSelectorResult.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/FieldSelectorResult.cs Wed Jul 29 18:04:12 2009
@@ -87,7 +87,7 @@
 			this.id = id;
 		}
 		
-		public  override bool Equals(System.Object o)
+		public  override bool Equals(object o)
 		{
 			if (this == o)
 				return true;

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Fieldable.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs Wed Jul 29 18:04:12 2009
@@ -22,10 +22,14 @@
 namespace Lucene.Net.Documents
 {
 	
-	/// <summary> Synonymous with {@link Field}.
-	/// 
-	/// 
-	/// </summary>
+	/// <summary>
+    /// Synonymous with {@link Field}.
+    /// <p><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
+    /// This means new methods may be added from version to version.  This change only affects the Fieldable API; other backwards
+    /// compatibility promises remain intact. For example, Lucene can still
+    /// read and write indices created within the same major version.
+    /// </p>
+ 	/// </summary>
 	public interface Fieldable
 	{
 		/// <summary>Sets the boost factor hits on this field.  This value will be
@@ -143,7 +147,19 @@
 		/// This effectively disables indexing boosts and length normalization for this field.
 		/// </summary>
 		void  SetOmitNorms(bool omitNorms);
-		
+
+        /// <summary>
+        /// Expert:
+        /// If set, omit term freq, positions, and payloads from postings for this field for this field.
+        /// </summary>
+        void SetOmitTf(bool omitTf);
+
+        /// <summary>
+        /// True if tf is omitted for this indexed field.
+        /// </summary>
+        /// <returns></returns>
+        bool GetOmitTf();
+
 		/// <summary> Indicates whether a Field is Lazy or not.  The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
 		/// it's values via {@link #StringValue()} or {@link #BinaryValue()} is only valid as long as the {@link Lucene.Net.Index.IndexReader} that
 		/// retrieved the {@link Document} is still open.
@@ -152,5 +168,35 @@
 		/// <returns> true if this field can be loaded lazily
 		/// </returns>
 		bool IsLazy();
-	}
+
+        /// <summary>
+        /// Returns the offset into the byte[] segment that is used as value.  If Fields is not binary returned value is undefined.
+        /// </summary>
+        /// <returns>index of the first byte in segment that represents this Field value</returns>
+        int GetBinaryOffset();
+
+        /// <summary>
+        /// Returns the of byte][ segment that is used as value.  If Fields is not binarythe returned value is undefined.
+        /// </summary>
+        /// <returns>length of byte[] segment that represents this Field value</returns>
+        int GetBinaryLength();
+
+        /// <summary>
+        /// Return the raw byte[] for the vinary field.  Note that you must also call GetBinaryLength() and GetBinaryOffset()
+        /// to know which range of bytes in the returned array belong to this Field.
+        /// </summary>
+        /// <returns>refererence to the Field value as byte</returns>
+        byte[] GetBinaryValue();
+
+        /// <summary>
+        /// Return the raw byte[] for the vinary field.  Note that you must also call GetBinaryLength() and GetBinaryOffset()
+        /// to know which range of bytes in the returned array belong to this Field.
+        /// About reuse: if you pass in the result byte[] and it is used, it is likely the underlying implementation
+        /// will hold onto this byte[] and return it in future calls to BinaryValue() of GetBinaryValue().
+        /// So if you subsequently re-use the same byte[] elsewhere it will alter this Fieldable's value.
+        /// </summary>
+        /// <param name="result">user defined buffer that will be used if non-null and large enough to contain the Field value</param>
+        /// <returns></returns>
+        byte[] GetBinaryValue(byte[] result);
+    }
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/MapFieldSelector.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/MapFieldSelector.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/MapFieldSelector.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/MapFieldSelector.cs Wed Jul 29 18:04:12 2009
@@ -23,8 +23,6 @@
 	/// <summary> A FieldSelector based on a Map of field names to FieldSelectorResults
 	/// 
 	/// </summary>
-	/// <author>  Chuck Williams
-	/// </author>
 	[Serializable]
 	public class MapFieldSelector : FieldSelector
 	{

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/BufferedDeletes.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/BufferedDeletes.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/BufferedDeletes.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/BufferedDeletes.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System.Collections.Generic;
+
+namespace Lucene.Net.Index
+{
+    /// <summary>
+    /// Holds buffered deletes, by docID, term or query.  We
+    /// hold two instances of this class: one for the deletes
+    /// prior to the last flush, the other for deletes after
+    /// the last flush.  This is so if we need to abort
+    /// (discard all buffered docs) we can also discard the
+    /// buffered deletes yet keep the deletes done during
+    /// previously flushed segments.
+    /// </summary>
+    internal class BufferedDeletes
+    {
+        internal int numTerms;
+        internal IDictionary<object, object> terms = new Dictionary<object, object>();
+        internal IDictionary<object, object> queries = new Dictionary<object, object>();
+        internal IList<object> docIDs = new List<object>();
+
+        // Number of documents a delete term applies to.
+        internal sealed class Num
+        {
+            private int num;
+
+            internal Num(int num)
+            {
+                this.num = num;
+            }
+
+            internal int GetNum()
+            {
+                return num;
+            }
+
+            internal void SetNum(int num)
+            {
+                // Only record the new number if it's greater than the
+                // current one.  This is important because if multiple
+                // threads are replacing the same doc at nearly the
+                // same time, it's possible that one thread that got a
+                // higher docID is scheduled before the other
+                // threads.
+                if (num > this.num)
+                    this.num = num;
+            }
+        }
+
+        internal void Update(BufferedDeletes in_Renamed)
+        {
+            numTerms += in_Renamed.numTerms;
+            SupportClass.CollectionsSupport.PutAll(in_Renamed.terms, terms);
+            SupportClass.CollectionsSupport.PutAll(in_Renamed.queries, queries);
+            SupportClass.CollectionsSupport.AddAll(in_Renamed.docIDs, docIDs);
+            in_Renamed.terms.Clear();
+            in_Renamed.numTerms = 0;
+            in_Renamed.queries.Clear();
+            in_Renamed.docIDs.Clear();
+        }
+
+        internal void Clear()
+        {
+            terms.Clear();
+            queries.Clear();
+            docIDs.Clear();
+            numTerms = 0;
+        }
+
+        internal bool Any()
+        {
+            return terms.Count > 0 || docIDs.Count > 0 || queries.Count > 0;
+        }
+
+        // Remaps all buffered deletes based on a completed
+        // merge
+        internal void Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
+        {
+            lock (this)
+            {
+                IDictionary<object, object> newDeleteTerms;
+
+                // Remap delete-by-term
+                if (terms.Count > 0)
+                {
+                    newDeleteTerms = new Dictionary<object, object>();
+                    IEnumerator<KeyValuePair<object, object>> iter = terms.GetEnumerator();
+                    while (iter.MoveNext())
+                    {
+                        KeyValuePair<object, object> entry = (KeyValuePair<object, object>)iter.Current;
+                        Num num = (Num)entry.Value;
+                        newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
+                    }
+                }
+                else
+                    newDeleteTerms = null;
+
+                // Remap delete-by-docID
+                IList<object> newDeleteDocIDs;
+
+                if (docIDs.Count > 0)
+                {
+                    newDeleteDocIDs = new List<object>(docIDs.Count);
+                    IEnumerator<object> iter = docIDs.GetEnumerator();
+                    while (iter.MoveNext())
+                    {
+                        int num = (int)iter.Current;
+                        newDeleteDocIDs.Add(mapper.Remap(num));
+                    }
+                }
+                else
+                    newDeleteDocIDs = null;
+
+                // Remap delete-by-query
+                IDictionary<object, object> newDeleteQueries;
+
+                if (queries.Count > 0)
+                {
+                    newDeleteQueries = new Dictionary<object, object>(queries.Count);
+                    IEnumerator<KeyValuePair<object, object>> iter = queries.GetEnumerator();
+                    while (iter.MoveNext())
+                    {
+                        KeyValuePair<object, object> entry = (KeyValuePair<object, object>)iter.Current;
+                        int num = (int)entry.Value;
+                        newDeleteQueries[entry.Key] = mapper.Remap(num);
+                    }
+                }
+                else
+                    newDeleteQueries = null;
+
+                if (newDeleteTerms != null)
+                    terms = newDeleteTerms;
+                if (newDeleteDocIDs != null)
+                    docIDs = newDeleteDocIDs;
+                if (newDeleteQueries != null)
+                    queries = newDeleteQueries;
+            }
+        }
+
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteBlockPool.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ByteBlockPool.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteBlockPool.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteBlockPool.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    /// <summary>
+    ///  Class that Posting and PostingVector use to write byte
+    ///  streams into shared fixed-size byte[] arrays.  The idea
+    ///  is to allocate slices of increasing lengths For
+    ///  example, the first slice is 5 bytes, the next slice is
+    ///  14, etc.  We start by writing our bytes into the first
+    ///  5 bytes.  When we hit the end of the slice, we allocate
+    ///  the next slice and then write the address of the new
+    ///  slice into the last 4 bytes of the previous slice (the
+    ///  "forwarding address").
+    /// 
+    ///  Each slice is filled with 0's initially, and we mark
+    ///  the end with a non-zero byte.  This way the methods
+    ///  that are writing into the slice don't need to record
+    ///  its length and instead allocate a new slice once they
+    ///  hit a non-zero byte.
+    /// </summary>
+    public sealed class ByteBlockPool
+    {
+        public abstract class Allocator
+        {
+            public abstract void RecycleByteBlocks(byte[][] blocks, int start, int end);
+            public abstract byte[] GetByteBlock(bool trackAllocations);
+        }
+
+
+        public byte[][] buffers = new byte[10][];
+
+        int bufferUpto = -1;                        // Which buffer we are upto
+        public int byteUpto = DocumentsWriter.BYTE_BLOCK_SIZE;             // Where we are in head buffer
+
+        public byte[] buffer;                              // Current head buffer
+        public int byteOffset = -DocumentsWriter.BYTE_BLOCK_SIZE;          // Current head offset
+
+        private readonly bool trackAllocations;
+        private readonly Allocator allocator;
+
+        public ByteBlockPool(Allocator allocator, bool trackAllocations)
+        {
+            this.allocator = allocator;
+            this.trackAllocations = trackAllocations;
+        }
+
+        public void Reset()
+        {
+            if (bufferUpto != -1)
+            {
+                // We allocated at least one buffer
+
+                for (int i = 0; i < bufferUpto; i++)
+                    // Fully zero fill buffers that we fully used
+                    System.Array.Clear(buffers[i], 0, buffers[i].Length);
+
+                // Partial zero fill the readonly buffer
+                System.Array.Clear(buffers[bufferUpto], 0, byteUpto);
+
+                if (bufferUpto > 0)
+                    // Recycle all but the first buffer
+                    allocator.RecycleByteBlocks(buffers, 1, 1 + bufferUpto);
+
+                // Re-use the first buffer
+                bufferUpto = 0;
+                byteUpto = 0;
+                byteOffset = 0;
+                buffer = buffers[0];
+            }
+        }
+
+        public void NextBuffer()
+        {
+            if (1 + bufferUpto == buffers.Length)
+            {
+                byte[][] newBuffers = new byte[(int)(buffers.Length * 1.5)][];
+                System.Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                buffers = newBuffers;
+            }
+            buffer = buffers[1 + bufferUpto] = allocator.GetByteBlock(trackAllocations);
+            bufferUpto++;
+
+            byteUpto = 0;
+            byteOffset += DocumentsWriter.BYTE_BLOCK_SIZE;
+        }
+
+        public int NewSlice(int size)
+        {
+            if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - size)
+                NextBuffer();
+            int upto = byteUpto;
+            byteUpto += size;
+            buffer[byteUpto - 1] = 16;
+            return upto;
+        }
+
+        // Size of each slice.  These arrays should be at most 16
+        // elements (index is encoded with 4 bits).  First array
+        // is just a compact way to encode X+1 with a max.  Second
+        // array is the length of each slice, ie first slice is 5
+        // bytes, next slice is 14 bytes, etc.
+        internal readonly static int[] nextLevelArray = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 9 };
+        internal readonly static int[] levelSizeArray = { 5, 14, 20, 30, 40, 40, 80, 80, 120, 200 };
+        internal readonly static int FIRST_LEVEL_SIZE = levelSizeArray[0];
+        public readonly static int FIRST_LEVEL_SIZE_For_NUnit_Test = levelSizeArray[0];
+
+        public int AllocSlice(byte[] slice, int upto)
+        {
+
+            int level = slice[upto] & 15;
+            int newLevel = nextLevelArray[level];
+            int newSize = levelSizeArray[newLevel];
+
+            // Maybe allocate another block
+            if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - newSize)
+                NextBuffer();
+
+            int newUpto = byteUpto;
+            int offset = newUpto + byteOffset;
+            byteUpto += newSize;
+
+            // Copy forward the past 3 bytes (which we are about
+            // to overwrite with the forwarding address):
+            buffer[newUpto] = slice[upto - 3];
+            buffer[newUpto + 1] = slice[upto - 2];
+            buffer[newUpto + 2] = slice[upto - 1];
+
+            // Write forwarding address at end of last slice:
+            slice[upto - 3] = (byte)(offset >> 24);
+            slice[upto - 2] = (byte)(offset >> 16);
+            slice[upto - 1] = (byte)(offset >> 8);
+            slice[upto] = (byte)offset;
+
+            // Write new level:
+            buffer[byteUpto - 1] = (byte)(16 | newLevel);
+
+            return newUpto + 3;
+        }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteSliceReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ByteSliceReader.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteSliceReader.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteSliceReader.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using IndexInput = Lucene.Net.Store.IndexInput;
+using IndexOutput = Lucene.Net.Store.IndexOutput;
+
+namespace Lucene.Net.Index
+{
+    /// <summary>
+    /// IndexInput that knows how to read the byte slices written
+    /// by Posting and PostingVector.  We read the bytes in
+    /// each slice until we hit the end of that slice at which
+    /// point we read the forwarding address of the next slice
+    /// and then jump to it.
+    /// </summary>
+    public sealed class ByteSliceReader : IndexInput
+    {
+        internal ByteBlockPool pool;
+        internal int bufferUpto;
+        internal byte[] buffer;
+        public int upto;
+        internal int limit;
+        internal int level;
+        public int bufferOffset;
+
+        public int endIndex;
+
+        public void Init(ByteBlockPool pool, int startIndex, int endIndex)
+        {
+
+            System.Diagnostics.Debug.Assert(endIndex - startIndex >= 0);
+            System.Diagnostics.Debug.Assert(startIndex >= 0);
+            System.Diagnostics.Debug.Assert(endIndex >= 0);
+
+            this.pool = pool;
+            this.endIndex = endIndex;
+
+            level = 0;
+            bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+            bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+            buffer = pool.buffers[bufferUpto];
+            upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+
+            int firstSize = ByteBlockPool.levelSizeArray[0];
+
+            if (startIndex + firstSize >= endIndex)
+            {
+                // There is only this one slice to read
+                limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+            }
+            else
+                limit = upto + firstSize - 4;
+        }
+
+        public bool Eof()
+        {
+            System.Diagnostics.Debug.Assert(upto + bufferOffset <= endIndex);
+            return upto + bufferOffset == endIndex;
+        }
+
+        public override byte ReadByte()
+        {
+            System.Diagnostics.Debug.Assert(!Eof());
+            System.Diagnostics.Debug.Assert(upto <= limit);
+            if (upto == limit)
+                NextSlice();
+            return buffer[upto++];
+        }
+
+        public long WriteTo(IndexOutput out_Renamed)
+        {
+            long size = 0;
+            while (true)
+            {
+                if (limit + bufferOffset == endIndex)
+                {
+                    System.Diagnostics.Debug.Assert(endIndex - bufferOffset >= upto);
+                    out_Renamed.WriteBytes(buffer, upto, limit - upto);
+                    size += limit - upto;
+                    break;
+                }
+                else
+                {
+                    out_Renamed.WriteBytes(buffer, upto, limit - upto);
+                    size += limit - upto;
+                    NextSlice();
+                }
+            }
+
+            return size;
+        }
+
+        public void NextSlice()
+        {
+
+            // Skip to our next slice
+            int nextIndex = ((buffer[limit] & 0xff) << 24) + ((buffer[1 + limit] & 0xff) << 16) + ((buffer[2 + limit] & 0xff) << 8) + (buffer[3 + limit] & 0xff);
+
+            level = ByteBlockPool.nextLevelArray[level];
+            int newSize = ByteBlockPool.levelSizeArray[level];
+
+            bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+            bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+
+            buffer = pool.buffers[bufferUpto];
+            upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+
+            if (nextIndex + newSize >= endIndex)
+            {
+                // We are advancing to the final slice
+                System.Diagnostics.Debug.Assert(endIndex - nextIndex > 0);
+                limit = endIndex - bufferOffset;
+            }
+            else
+            {
+                // This is not the final slice (subtract 4 for the
+                // forwarding address at the end of this new slice)
+                limit = upto + newSize - 4;
+            }
+        }
+
+        public override void ReadBytes(byte[] b, int offset, int len)
+        {
+            while (len > 0)
+            {
+                int numLeft = limit - upto;
+                if (numLeft < len)
+                {
+                    // Read entire slice
+                    System.Array.Copy(buffer, upto, b, offset, numLeft);
+                    offset += numLeft;
+                    len -= numLeft;
+                    NextSlice();
+                }
+                else
+                {
+                    // This slice is the last one
+                    System.Array.Copy(buffer, upto, b, offset, len);
+                    upto += len;
+                    break;
+                }
+            }
+        }
+
+        public override long GetFilePointer() { throw new System.SystemException("not implemented"); }
+        public override long Length() { throw new System.SystemException("not implemented"); }
+        public override void Seek(long pos) { throw new System.SystemException("not implemented"); }
+        public override void Close() { throw new System.SystemException("not implemented"); }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteSliceWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ByteSliceWriter.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteSliceWriter.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ByteSliceWriter.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    /// <summary>
+    /// Class to write byte streams into slices of shared
+    /// byte[].  This is used by DocumentsWriter to hold the
+    /// posting list for many terms in RAM.
+    /// </summary>
+    public sealed class ByteSliceWriter
+    {
+        private byte[] slice;
+        private int upto;
+        private readonly ByteBlockPool pool;
+
+        int offset0;
+
+        public ByteSliceWriter(ByteBlockPool pool)
+        {
+            this.pool = pool;
+        }
+
+        /// <summary>
+        /// Set up the writer to write at address.
+        /// </summary>
+        /// <param name="address"></param>
+        public void Init(int address)
+        {
+            slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
+            System.Diagnostics.Debug.Assert(slice != null);
+            upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
+            offset0 = address;
+            System.Diagnostics.Debug.Assert(upto < slice.Length);
+        }
+
+        /// <summary> 
+        /// Write byte into byte slice stream
+        /// </summary>
+        public void WriteByte(byte b)
+        {
+            System.Diagnostics.Debug.Assert(slice != null);
+            if (slice[upto] != 0)
+            {
+                upto = pool.AllocSlice(slice, upto);
+                slice = pool.buffer;
+                offset0 = pool.byteOffset;
+                System.Diagnostics.Debug.Assert(slice != null);
+            }
+            slice[upto++] = b;
+            System.Diagnostics.Debug.Assert(upto != slice.Length);
+        }
+
+        public void WriteBytes(byte[] b, int offset, int len)
+        {
+            int offsetEnd = offset + len;
+            while (offset < offsetEnd)
+            {
+                if (slice[upto] != 0)
+                {
+                    // End marker
+                    upto = pool.AllocSlice(slice, upto);
+                    slice = pool.buffer;
+                    offset0 = pool.byteOffset;
+                }
+
+                slice[upto++] = b[offset++];
+                System.Diagnostics.Debug.Assert(upto != slice.Length);
+            }
+        }
+
+        public int GetAddress()
+        {
+            return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK);
+        }
+
+        public void WriteVInt(int i)
+        {
+            while ((i & ~0x7F) != 0)
+            {
+                WriteByte((byte)((i & 0x7f) | 0x80));
+                i = (int)((uint)i >> 7);
+            }
+            WriteByte((byte)i);
+        }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CharBlockPool.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/CharBlockPool.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CharBlockPool.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CharBlockPool.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    sealed class CharBlockPool
+    {
+        public char[][] buffers = new char[10][];
+
+        int bufferUpto = -1;                        // Which buffer we are upto
+        public int charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;             // Where we are in head buffer
+
+        public char[] buffer;                              // Current head buffer
+        public int charOffset = -DocumentsWriter.CHAR_BLOCK_SIZE;          // Current head offset
+        private readonly DocumentsWriter docWriter;
+
+        public CharBlockPool(DocumentsWriter docWriter)
+        {
+            this.docWriter = docWriter;
+        }
+
+        public void reset()
+        {
+            docWriter.RecycleCharBlocks(buffers, 1 + bufferUpto);
+            bufferUpto = -1;
+            charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
+            charOffset = -DocumentsWriter.CHAR_BLOCK_SIZE;
+        }
+
+        public void nextBuffer()
+        {
+            if (1 + bufferUpto == buffers.Length)
+            {
+                char[][] newBuffers = new char[(int)(buffers.Length * 1.5)][];
+                System.Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                buffers = newBuffers;
+            }
+            buffer = buffers[1 + bufferUpto] = docWriter.GetCharBlock();
+            bufferUpto++;
+
+            charUpto = 0;
+            charOffset += DocumentsWriter.CHAR_BLOCK_SIZE;
+        }
+    }
+}



Mime
View raw message