Tim Golden added the comment:
The attached patch against r59286 tries to tease apart the uses of
filename by adding "webify_filepath" and "unwebify_filepath" functions
to sphinx.utils which are then used throughout the app to convert from
filesystem-separated to web-separated paths and back. A WEB_SEP constant
has been defined principally to make its purpose clear in case a
constant "/" was thought to have been a hangover from the code's *nix
origins.
The result has been tested by building manually, ie by invoking python
tools\sphinx-build.py with appropriate params under Win32 native only.
The html, web and htmlhelp versions all style up and function correctly.
It would obviously benefit from testing under Cygwin and *nix. I'm
particularly keen to see it it works on a filesystem which doesn't allow
"/" as a separator.
Added file: http://bugs.python.org/file8860/sphinx-r59286.patch
__________________________________
Tracker <[EMAIL PROTECTED]>
<http://bugs.python.org/issue1520>
__________________________________
Index: builder.py
===================================================================
--- builder.py (revision 59286)
+++ builder.py (working copy)
@@ -27,7 +27,8 @@
from docutils.frontend import OptionParser
from .util import (get_matching_files, attrdict, status_iterator,
- ensuredir, get_category, relative_uri)
+ ensuredir, get_category, relative_uri, webify_filepath,
+ unwebify_filepath)
from .writer import HTMLWriter
from .util.console import bold, purple, green
from .htmlhelp import build_hhx
@@ -483,12 +484,12 @@
self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))):
try:
targetmtime = path.getmtime(path.join(self.outdir,
- filename[:-4] + '.html'))
+ unwebify_filepath(filename)[:-4] + '.html'))
except:
targetmtime = 0
if filename not in self.env.all_files:
yield filename
- elif path.getmtime(path.join(self.srcdir, filename)) > targetmtime:
+ elif path.getmtime(path.join(self.srcdir, unwebify_filepath(filename))) > targetmtime:
yield filename
@@ -513,7 +514,7 @@
ctx = self.globalcontext.copy()
ctx.update(context)
output = self.templates[templatename].render(ctx)
- outfilename = path.join(self.outdir, filename[:-4] + '.html')
+ outfilename = path.join(self.outdir, unwebify_filepath(filename)[:-4] + '.html')
ensuredir(path.dirname(outfilename)) # normally different from self.outdir
try:
with codecs.open(outfilename, 'w', 'utf-8') as fp:
@@ -522,7 +523,7 @@
print >>self.warning_stream, "Error writing file %s: %s" % (outfilename, err)
if self.copysource and context.get('sourcename'):
# copy the source file for the "show source" link
- shutil.copyfile(path.join(self.srcdir, filename),
+ shutil.copyfile(path.join(self.srcdir, unwebify_filepath(filename)),
path.join(self.outdir, context['sourcename']))
def handle_finish(self):
@@ -547,10 +548,10 @@
self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))):
try:
targetmtime = path.getmtime(path.join(self.outdir,
- filename[:-4] + '.fpickle'))
+ unwebify_filepath(filename)[:-4] + '.fpickle'))
except:
targetmtime = 0
- if path.getmtime(path.join(self.srcdir, filename)) > targetmtime:
+ if path.getmtime(path.join(self.srcdir, unwebify_filepath(filename))) > targetmtime:
yield filename
def get_target_uri(self, source_filename):
@@ -577,7 +578,7 @@
self.indexer.feed(filename, category, title, doctree)
def handle_file(self, filename, context, templatename='page'):
- outfilename = path.join(self.outdir, filename[:-4] + '.fpickle')
+ outfilename = path.join(self.outdir, unwebify_filepath(filename)[:-4] + '.fpickle')
ensuredir(path.dirname(outfilename))
context.pop('pathto', None) # can't be pickled
with file(outfilename, 'wb') as fp:
@@ -587,7 +588,7 @@
if context.get('sourcename'):
source_name = path.join(self.outdir, 'sources', context['sourcename'])
ensuredir(path.dirname(source_name))
- shutil.copyfile(path.join(self.srcdir, filename), source_name)
+ shutil.copyfile(path.join(self.srcdir, unwebify_filepath(filename)), source_name)
def handle_finish(self):
# dump the global context
Index: directives.py
===================================================================
--- directives.py (revision 59286)
+++ directives.py (working copy)
@@ -18,7 +18,8 @@
from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives import admonitions
-from . import addnodes
+from . import addnodes
+from .util import webify_filepath, unwebify_filepath
# ------ index markup --------------------------------------------------------------
@@ -554,7 +555,8 @@
subnode = addnodes.toctree()
includefiles = filter(None, content)
# absolutize filenames
- includefiles = map(lambda x: path.normpath(path.join(dirname, x)), includefiles)
+ includefiles = [webify_filepath(path.normpath(path.join (dirname, x))) for x in includefiles]
+ #~ includefiles = map(lambda x: path.normpath(path.join(dirname, x)), includefiles)
subnode['includefiles'] = includefiles
subnode['maxdepth'] = options.get('maxdepth', -1)
return [subnode]
@@ -599,9 +601,9 @@
return [state.document.reporter.warning('File insertion disabled', line=lineno)]
env = state.document.settings.env
fn = arguments[0]
- source_dir = path.dirname(path.abspath(state_machine.input_lines.source(
- lineno - state_machine.input_offset - 1)))
- fn = path.normpath(path.join(source_dir, fn))
+ source_dir = webify_filepath(path.dirname(path.abspath(state_machine.input_lines.source(
+ lineno - state_machine.input_offset - 1))))
+ fn = webify_filepath(path.normpath(path.join(source_dir, fn)))
try:
with open(fn) as f:
Index: environment.py
===================================================================
--- environment.py (revision 59286)
+++ environment.py (working copy)
@@ -28,8 +28,8 @@
from docutils.readers import standalone
from docutils.transforms import Transform
from docutils.transforms.parts import ContentsFilter
-from docutils.transforms.universal import FilterMessages
-
+from docutils.transforms.universal import FilterMessages
+
# monkey-patch reST parser to disable alphabetic and roman enumerated lists
from docutils.parsers.rst.states import Body
Body.enum.converters['loweralpha'] = \
@@ -38,7 +38,7 @@
Body.enum.converters['upperroman'] = lambda x: None
from . import addnodes
-from .util import get_matching_files
+from .util import get_matching_files, unwebify_filepath, WEB_SEP
from .refcounting import Refcounts
default_settings = {
@@ -278,11 +278,11 @@
else:
# if the doctree file is not there, rebuild
if not path.isfile(path.join(self.doctreedir,
- filename[:-3] + 'doctree')):
+ unwebify_filepath(filename)[:-3] + 'doctree')):
changed.append(filename)
continue
mtime, md5 = self.all_files[filename]
- newmtime = path.getmtime(path.join(self.srcdir, filename))
+ newmtime = path.getmtime(path.join(self.srcdir, unwebify_filepath(filename)))
if newmtime == mtime:
continue
# check the MD5
@@ -296,7 +296,9 @@
def update(self, config):
"""
(Re-)read all files new or changed since last update.
- Yields a summary and then filenames as it processes them.
+ Yields a summary and then filenames as it processes them.
+ Store all environment filenames as webified (ie using "/"
+ as a separator in place of os.path.sep).
"""
added, changed, removed = self.get_outdated_files(config)
msg = '%s added, %s changed, %s removed' % (len(added), len(changed),
@@ -329,7 +331,7 @@
self.clear_file(filename)
if src_path is None:
- src_path = path.join(self.srcdir, filename)
+ src_path = path.join(self.srcdir, unwebify_filepath(filename))
self.filename = filename
doctree = publish_doctree(None, src_path, FileInput,
@@ -360,7 +362,7 @@
if save_parsed:
# save the parsed doctree
- doctree_filename = path.join(self.doctreedir, filename[:-3] + 'doctree')
+ doctree_filename = path.join(self.doctreedir, unwebify_filepath(filename)[:-3] + 'doctree')
dirname = path.dirname(doctree_filename)
if not path.isdir(dirname):
os.makedirs(dirname)
@@ -516,7 +518,7 @@
def get_doctree(self, filename):
"""Read the doctree for a file from the pickle and return it."""
- doctree_filename = path.join(self.doctreedir, filename[:-3] + 'doctree')
+ doctree_filename = path.join(self.doctreedir, unwebify_filepath(filename)[:-3] + 'doctree')
with file(doctree_filename, 'rb') as f:
doctree = pickle.load(f)
doctree.reporter = Reporter(filename, 2, 4, stream=self.warning_stream)
@@ -862,6 +864,6 @@
filename. This also resolves the special `index.rst` files. If the file
does not exist the return value will be `None`.
"""
- for rstname in filename + '.rst', filename + path.sep + 'index.rst':
+ for rstname in filename + '.rst', filename + WEB_SEP + 'index.rst':
if rstname in self.all_files:
return rstname
Index: util/__init__.py
===================================================================
--- util/__init__.py (revision 59286)
+++ util/__init__.py (working copy)
@@ -14,18 +14,36 @@
import fnmatch
from os import path
+
+#
+# Define WEB_SEP as a manifest constant, not
+# so much because we expect it to change in
+# the future as to avoid the suspicion that
+# a stray "/" in the code is a hangover from
+# more *nix-oriented origins.
+#
+WEB_SEP = "/"
+
+def webify_filepath(filepath):
+ return filepath.replace(os.path.sep, WEB_SEP)
+
+
+def unwebify_filepath(webpath):
+ return webpath.replace(WEB_SEP, os.path.sep)
+
+
def relative_uri(base, to):
"""Return a relative URL from ``base`` to ``to``."""
- b2 = base.split('/')
- t2 = to.split('/')
+ b2 = base.split(WEB_SEP)
+ t2 = to.split(WEB_SEP)
# remove common segments
for x, y in zip(b2, t2):
if x != y:
break
b2.pop(0)
t2.pop(0)
- return '../' * (len(b2)-1) + '/'.join(t2)
+ return ('..' + WEB_SEP) * (len(b2)-1) + WEB_SEP.join(t2)
def ensuredir(path):
@@ -60,12 +78,12 @@
qualified_name = path.join(root[dirlen:], sfile)
if qualified_name in exclude:
continue
- yield qualified_name
+ yield webify_filepath(qualified_name)
def get_category(filename):
"""Get the "category" part of a RST filename."""
- parts = filename.split('/', 1)
+ parts = filename.split(WEB_SEP, 1)
if len(parts) < 2:
return
return parts[0]
_______________________________________________
Python-bugs-list mailing list
Unsubscribe:
http://mail.python.org/mailman/options/python-bugs-list/archive%40mail-archive.com