incubator-cvs mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s...@apache.org
Subject svn commit: r1348870 - /incubator/public/trunk/clutch.py
Date Mon, 11 Jun 2012 14:35:34 GMT
Author: sebb
Date: Mon Jun 11 14:35:33 2012
New Revision: 1348870

URL: http://svn.apache.org/viewvc?rev=1348870&view=rev
Log:
Add comments to appear in outline for easier navigation

Modified:
    incubator/public/trunk/clutch.py

Modified: incubator/public/trunk/clutch.py
URL: http://svn.apache.org/viewvc/incubator/public/trunk/clutch.py?rev=1348870&r1=1348869&r2=1348870&view=diff
==============================================================================
--- incubator/public/trunk/clutch.py (original)
+++ incubator/public/trunk/clutch.py Mon Jun 11 14:35:33 2012
@@ -185,6 +185,7 @@ statusTallyDate2 = gatherDate - delta
 delta = datetime.timedelta(days=273)
 statusTallyDate3 = gatherDate - delta
 
+# Regualar expressions ---
 # These expressions are used often, so compile them early.
 commentsRE = re.compile("(.*) *\(([^)]+)\)")
 startDateRE = re.compile("([0-9]+)-0?([0-9]+)-?0?([0-9]+)?")
@@ -203,12 +204,14 @@ releasesBadNameRE = re.compile("^([-a-z0
 linkReportingSchedule = '<a href="http://wiki.apache.org/incubator/ReportingSchedule">ReportingSchedule</a>'
 linkIncubationTable = '<a href="projects/">IncubationTable</a>'
 
-# Import the persistent data.
+# Import the persistent data ---
 # This enables us to skip detection of website etc. if already detected.
 inputFile = open('clutch.pkl', 'rb')
 state = pickle.load(inputFile)
 inputFile.close()
 
+# Gather data from the ReportingSchedule ---
+
 print("Gather data from the ReportingSchedule ...") 
 # Parse the wiki ReportingSchedule to gather project details
 text = getUrl(REPORTING_SCHEDULE).read().decode('utf-8')
@@ -300,7 +303,7 @@ for k in sorted(projectNames, key=str.lo
 
 projectsTable = {} # internal data from podlings.xml
 
-# Parse the podlings data file
+# Parse the podlings data file ---
 dom = xml.dom.minidom.parse(SITE_CONTENT_DIR+"podlings.xml")
 
 graduatedProjects = {}
@@ -369,7 +372,8 @@ for row in dom.getElementsByTagName("pod
 
 dom.unlink()
 
-# Process the incubation table data, detect some potential issues.
+# Process the incubation table data, detect some potential issues. ---
+
 print("Gather details from project status files ...")
 projectNames = list(projectsTable.keys())
 for k in sorted(projectNames, key=str.lower):
@@ -466,6 +470,8 @@ for k in sorted(projectNames, key=str.lo
 
 # end of processing incubation table data
 
+# Gather committers data ---
+
 print("Gather committers data ...")
 # Using the generated p.a.o/committers-by-project.html page is the easiest way.
 # However it has a very flat structure.
@@ -516,6 +522,8 @@ committers.feed(dataCommitters)
 committers.close()
 #pprint.pprint(committers.projects)
 
+# Gather incubator group mail list data ---
+
 print("Gather incubator group mail list data ...")
 class IncubatorMailListNamesParser(HTMLParser):
 
@@ -573,6 +581,8 @@ if optionVerbose:
   print("DEBUG: projectMailLists")
   pprint.pprint(projectMailLists)
 
+# Gather incubator PGP keys data ---
+
 print("Gather incubator PGP keys data ...")
 keysNamesRE = re.compile("/dist/incubator/([^/]+)/(.*)")
 keysList = {}
@@ -582,6 +592,8 @@ for line in lines:
   if matchKey:
     keysList[matchKey.group(1)] = "{0}/{1}/{2}".format("http://www.apache.org/dist/incubator",
matchKey.group(1), matchKey.group(2))
 
+# Gather data about releases ---
+
 print("Gather data about releases ...")
 releases = {}
 lines = getUrl(INCUBATOR_RELEASES).readlines()
@@ -624,6 +636,8 @@ for k in releasesBadName:
     errorMsg += ". See <a href=\"#h-Graduate\">help</a>."
     otherIssues.append(errorMsg)
 
+# Processing the gathered sata ---
+
 print("Processing ...")
 # Process the reporting schedule data, correlate and ensure each exists in the
 # incubation projects summary table, add more details to the data store.
@@ -704,6 +718,7 @@ for k in sorted(projectNames, key=str.lo
 
 # end of processing
 
+# Collect SVN directory names ---
 
 print("Collect SVN directory names")
 incubatorSvnDirs = {} # top-level SVN incubator dirs
@@ -727,6 +742,8 @@ for entry in dom.getElementsByTagName("e
     if name not in ('trunk', 'public'): # skip non-podling entries
       incubatorSvnDirs["http://svn.apache.org/repos/asf/incubator/{0}/".format(name)] = True
 
+# Detect certain resources ---
+
 print("Detect certain resources ...")
 for k in sorted(projectNames, key=str.lower):
   print(k)
@@ -948,6 +965,8 @@ if optionInfo:
     if incubatorSvnDirs[entry] == True and entry in graduatedProjects:
       print("INFO: graduated project has SVN directory " + entry)
 
+# Output data files ---
+
 print("Output the data ...")
 reportingGroups = {'month': 'Monthly',
     'group-1': 'January,April,July,October',



---------------------------------------------------------------------
To unsubscribe, e-mail: cvs-unsubscribe@incubator.apache.org
For additional commands, e-mail: cvs-help@incubator.apache.org


Mime
View raw message