Am Sonntag, 1. April 2007 11:20 schrieb Abdelrazak Younes:

> So Georg, what's next on this front?

Concerning the input method I don't have an overview. Concerning the LaTeX 
export I did start to develop a patch (attached), but it is completely 
broken currently. I'll continue to work (slowly) on this, but don't expect 
anything before easter.


Georg
Index: src/LaTeXFeatures.C
===================================================================
--- src/LaTeXFeatures.C	(Revision 17674)
+++ src/LaTeXFeatures.C	(Arbeitskopie)
@@ -60,8 +60,9 @@ LaTeXFeatures::LaTeXFeatures(Buffer cons
 bool LaTeXFeatures::useBabel() const
 {
 	return lyxrc.language_use_babel ||
-		bufferParams().language->lang() != lyxrc.default_language ||
-		this->hasLanguages();
+		(bufferParams().language->lang() != lyxrc.default_language &&
+		 !bufferParams().language->babel().empty()) ||
+		hasLanguages();
 }
 
 
@@ -185,7 +186,8 @@ void LaTeXFeatures::useFloat(string cons
 
 void LaTeXFeatures::useLanguage(Language const * lang)
 {
-	UsedLanguages_.insert(lang);
+	if (!lang->babel().empty())
+		UsedLanguages_.insert(lang);
 }
 
 
@@ -205,26 +207,29 @@ string LaTeXFeatures::getLanguages() con
 {
 	ostringstream languages;
 
-	for (LanguageList::const_iterator cit =
-		    UsedLanguages_.begin();
+	LanguageList::const_iterator const begin = UsedLanguages_.begin();
+	for (LanguageList::const_iterator cit = begin;
 	     cit != UsedLanguages_.end();
-	     ++cit)
-		languages << (*cit)->babel() << ',';
+	     ++cit) {
+		if (cit != begin)
+			languages << ',';
+		languages << (*cit)->babel();
+	}
 	return languages.str();
 }
 
 
 set<string> LaTeXFeatures::getEncodingSet(string const & doc_encoding) const
 {
+	// This does only find encodings of languages supported by babel, but
+	// that does not matter since we don't have a language with an
+	// encoding supported by inputenc but without babel support.
 	set<string> encodings;
 	LanguageList::const_iterator it  = UsedLanguages_.begin();
 	LanguageList::const_iterator end = UsedLanguages_.end();
 	for (; it != end; ++it)
-		// thailatex does not use the inputenc package, but sets up
-		// babel directly for tis620-0 encoding, therefore we must
-		// not add tis620-0 to the encoding set.
 		if ((*it)->encoding()->latexName() != doc_encoding &&
-		    (*it)->encoding()->name() != "tis620-0")
+		    (*it)->encoding()->package() == Encoding::inputenc)
 			encodings.insert((*it)->encoding()->latexName());
 	return encodings;
 }
Index: src/encoding.h
===================================================================
--- src/encoding.h	(Revision 17674)
+++ src/encoding.h	(Arbeitskopie)
@@ -28,11 +28,19 @@ class LaTeXFeatures;
 ///
 class Encoding {
 public:
+	/// Which LaTeX package handles this encoding?
+	enum Package {
+		none,
+		inputenc,
+		CJK
+	};
 	///
 	Encoding() {}
 	///
 	Encoding(std::string const & n, std::string const & l,
-	         std::string const & i);
+	         std::string const & i, bool f, Package p);
+	///
+	void init() const;
 	///
 	std::string const & name() const { return Name_; }
 	///
@@ -51,6 +59,8 @@ public:
 	/// Add the preamble snippet needed for the output of latexChar(c)
 	/// to \p features.
 	void validate(char_type c, LaTeXFeatures & features) const;
+	/// Which LaTeX package handles this encoding?
+	Package package() const { return package_; }
 private:
 	///
 	std::string Name_;
@@ -58,15 +68,27 @@ private:
 	std::string LatexName_;
 	///
 	std::string iconvName_;
+	/// Is this a fixed width encoding?
+	bool fixedwidth_;
 	///
 	typedef std::set<char_type> CharSet;
 	/// Set of UCS4 characters that we can encode (for singlebyte
 	/// encodings only)
-	CharSet encodable_;
+	mutable CharSet encodable_;
 	/// All code points below this are encodable. This helps us to avoid
 	/// lokup of ASCII characters in encodable_ and gives about 1 sec
 	/// speedup on export of the Userguide.
-	char_type start_encodable_;
+	mutable char_type start_encodable_;
+	/// Which LaTeX package handles this encoding?
+	Package package_;
+	/**
+	 * If this is true the stored information about the encoding covers
+	 * all encodable characters. We set this to false initially so that
+	 * we only need to query iconv for the actually used encodings.
+	 * This is needed especially for the multibyte encodings, if we
+	 * complete all encoding info on startup it takes 2-3 minutes.
+	 */
+	mutable bool complete_;
 };
 
 class Encodings {
@@ -94,6 +116,9 @@ public:
 	Encoding const * getFromLyXName(std::string const & name) const;
 	/// Get encoding from LaTeX name \p name
 	Encoding const * getFromLaTeXName(std::string const & name) const;
+	/// Get encoding from iconv name \p name
+	/// This is not unique!
+	Encoding const * getFromIconvName(std::string const & name) const;
 
 	///
 	const_iterator begin() const { return encodinglist.begin(); }
Index: src/paragraph_pimpl.C
===================================================================
--- src/paragraph_pimpl.C	(Revision 17674)
+++ src/paragraph_pimpl.C	(Arbeitskopie)
@@ -817,7 +817,7 @@ void Paragraph::Pimpl::validate(LaTeXFea
 		{
 			features.useLanguage(language);
 			lyxerr[Debug::LATEX] << "Found language "
-					     << language->babel() << endl;
+			                     << language->lang() << endl;
 		}
 	}
 
Index: src/LaTeXFeatures.h
===================================================================
--- src/LaTeXFeatures.h	(Revision 17674)
+++ src/LaTeXFeatures.h	(Arbeitskopie)
@@ -113,7 +113,7 @@ private:
 	static PackagesList packages_;
 	///
 	typedef std::set<Language const *> LanguageList;
-	///
+	/// used languages (only those that are supported by babel)
 	LanguageList UsedLanguages_;
 	///
 	typedef std::set<std::string> UsedFloats;
Index: src/buffer.C
===================================================================
--- src/buffer.C	(Revision 17674)
+++ src/buffer.C	(Arbeitskopie)
@@ -141,7 +141,7 @@ using std::string;
 
 namespace {
 
-int const LYX_FORMAT = 263;
+int const LYX_FORMAT = 264;
 
 } // namespace anon
 
@@ -976,7 +976,8 @@ void Buffer::writeLaTeXSource(odocstream
 	} // output_preamble
 	lyxerr[Debug::INFO] << "preamble finished, now the body." << endl;
 
-	if (!lyxrc.language_auto_begin) {
+	if (!lyxrc.language_auto_begin &&
+	    !params().language->babel().empty()) {
 		// FIXME UNICODE
 		os << from_utf8(subst(lyxrc.language_command_begin,
 		                           "$$lang",
@@ -985,6 +986,16 @@ void Buffer::writeLaTeXSource(odocstream
 		texrow().newline();
 	}
 
+	Encoding const & encoding = params().encoding();
+	if (encoding.package() == Encoding::CJK) {
+		// Open a CJK environment, since in contrast to the encodings
+		// handled by inputenc the document encoding is not set in
+		// the preamble if it is handled by CJK.sty.
+		os << "\\begin{CJK}{" << from_ascii(encoding.latexName())
+		   << "}{}\n";
+		texrow().newline();
+	}
+
 	// if we are doing a real file with body, even if this is the
 	// child of some other buffer, let's cut the link here.
 	// This happens for example if only a child document is printed.
@@ -1005,7 +1016,16 @@ void Buffer::writeLaTeXSource(odocstream
 	os << endl;
 	texrow().newline();
 
-	if (!lyxrc.language_auto_end) {
+	if (encoding.package() == Encoding::CJK) {
+		// Close the open CJK environment.
+		// latexParagraphs will have opened one even if the last text
+		// was not CJK.
+		os << "\\end{CJK}\n";
+		texrow().newline();
+	}
+
+	if (!lyxrc.language_auto_end &&
+	    !params().language->babel().empty()) {
 		os << from_utf8(subst(lyxrc.language_command_end,
 		                           "$$lang",
 		                           params().language->babel()))
Index: src/bufferparams.C
===================================================================
--- src/bufferparams.C	(Revision 17674)
+++ src/bufferparams.C	(Arbeitskopie)
@@ -797,16 +797,15 @@ bool BufferParams::writeLaTeX(odocstream
 		clsoptions << "landscape,";
 
 	// language should be a parameter to \documentclass
-	if (language->babel() == "hebrew"
-	    && default_language->babel() != "hebrew")
-		// This seems necessary
-		features.useLanguage(default_language);
-
 	ostringstream language_options;
 	bool const use_babel = features.useBabel();
 	if (use_babel) {
 		language_options << features.getLanguages();
-		language_options << language->babel();
+		if (!language->babel().empty()) {
+			if (!language_options.str().empty())
+				language_options << ',';
+			language_options << language->babel();
+		}
 		if (lyxrc.language_global_options)
 			clsoptions << language_options.str() << ',';
 	}
@@ -849,16 +848,15 @@ bool BufferParams::writeLaTeX(odocstream
 	if (inputenc == "auto") {
 		string const doc_encoding =
 			language->encoding()->latexName();
+		Encoding::Package const package =
+			language->encoding()->package();
 
 		// Create a list with all the input encodings used
 		// in the document
 		std::set<string> encodings =
 			features.getEncodingSet(doc_encoding);
 
-		// thailatex does not use the inputenc package, but sets up
-		// babel directly for tis620-0 encoding, therefore we must
-		// not request inputenc for tis620-0 encoding
-		if (!encodings.empty() || doc_encoding != "tis620-0") {
+		if (!encodings.empty() || package == Encoding::inputenc) {
 			os << "\\usepackage[";
 			std::set<string>::const_iterator it = encodings.begin();
 			std::set<string>::const_iterator const end = encodings.end();
@@ -868,7 +866,7 @@ bool BufferParams::writeLaTeX(odocstream
 			}
 			for (; it != end; ++it)
 				os << ',' << from_ascii(*it);
-			if (doc_encoding != "tis620-0") {
+			if (package == Encoding::inputenc) {
 				if (!encodings.empty())
 					os << ',';
 				os << from_ascii(doc_encoding);
@@ -876,11 +874,24 @@ bool BufferParams::writeLaTeX(odocstream
 			os << "]{inputenc}\n";
 			texrow.newline();
 		}
-	} else if (inputenc != "default" && inputenc != "tis620-0" &&
-	           inputenc != "ascii") {
-		os << "\\usepackage[" << from_ascii(inputenc)
-		   << "]{inputenc}\n";
-		texrow.newline();
+		if (package == Encoding::CJK) {
+			os << "\\usepackage{CJK}\n";
+			texrow.newline();
+		}
+	} else if (inputenc != "default") {
+		switch (language->encoding()->package()) {
+		case Encoding::none:
+			break;
+		case Encoding::inputenc:
+			os << "\\usepackage[" << from_ascii(inputenc)
+			   << "]{inputenc}\n";
+			texrow.newline();
+			break;
+		case Encoding::CJK:
+			os << "\\usepackage{CJK}\n";
+			texrow.newline();
+			break;
+		}
 	}
 
 	if (use_geometry || nonstandard_papersize) {
Index: src/lyxfont.C
===================================================================
--- src/lyxfont.C	(Revision 17674)
+++ src/lyxfont.C	(Arbeitskopie)
@@ -752,12 +752,15 @@ int LyXFont::latexWriteStartChanges(odoc
 				os << "\\L{";
 				count += 3;
 			}
-		} else {
+		} else if (!language()->babel().empty()) {
 			string const tmp =
 				subst(lyxrc.language_command_local,
 				      "$$lang", language()->babel());
 			os << from_ascii(tmp);
 			count += tmp.length();
+		} else {
+			os << '{';
+			count += 1;
 		}
 	}
 
Index: src/paragraph.C
===================================================================
--- src/paragraph.C	(Revision 17674)
+++ src/paragraph.C	(Arbeitskopie)
@@ -1075,12 +1075,15 @@ bool Paragraph::simpleTeXOnePar(Buffer c
 		}
 
 		// Switch file encoding if necessary
-		int const count = switchEncoding(os, bparams,
-				*(runparams.encoding),
-				*(font.language()->encoding()));
-		if (count > 0) {
-			column += count;
-			runparams.encoding = font.language()->encoding();
+		if (runparams.encoding->package() == Encoding::inputenc &&
+		    font.language()->encoding()->package() == Encoding::inputenc) {
+			int const count = switchEncoding(os, bparams,
+					*(runparams.encoding),
+					*(font.language()->encoding()));
+			if (count > 0) {
+				column += count;
+				runparams.encoding = font.language()->encoding();
+			}
 		}
 
 		// Do we need to change font?
Index: src/encoding.C
===================================================================
--- src/encoding.C	(Revision 17674)
+++ src/encoding.C	(Arbeitskopie)
@@ -123,25 +123,42 @@ struct CharInfo {
 typedef std::map<char_type, CharInfo> CharInfoMap;
 CharInfoMap unicodesymbols;
 
+
+/// The highest code point in UCS4 encoding (1<<20 + 1<<16)
+char_type const max_ucs4 = 0x110000;
+
 } // namespace anon
 
 
-Encoding::Encoding(string const & n, string const & l, string const & i)
-	: Name_(n), LatexName_(l), iconvName_(i)
+Encoding::Encoding(string const & n, string const & l, string const & i,
+                   bool f, Encoding::Package p)
+	: Name_(n), LatexName_(l), iconvName_(i), fixedwidth_(f), package_(p)
 {
-	if (n == "ascii")
+	if (n == "ascii") {
 		// ASCII can encode 128 code points and nothing else
 		start_encodable_ = 128;
-	else if (i == "UTF-8")
-		// UTF8 can encode all 1<<20 + 1<<16 UCS4 code points
-		start_encodable_ = 0x110000;
-	else {
-		start_encodable_ = 0;
-		// temporarily switch off lyxerr, since we will generate iconv errors
-		lyxerr.disable();
+		complete_ = true;
+	} else if (i == "UTF-8") {
+		// UTF8 can encode all UCS4 code points
+		start_encodable_ = max_ucs4;
+		complete_ = true;
+	} else {
+		complete_ = false;
+	}
+}
+
+
+void Encoding::init() const
+{
+	start_encodable_ = 0;
+	// temporarily switch off lyxerr, since we will generate iconv errors
+	lyxerr.disable();
+	if (fixedwidth_) {
+		// We do not need to check all UCS4 code points, it is enough
+		// if we check all 256 code points of this encoding.
 		for (unsigned short j = 0; j < 256; ++j) {
 			char const c = j;
-			std::vector<char_type> const ucs4 = eightbit_to_ucs4(&c, 1, i);
+			std::vector<char_type> const ucs4 = eightbit_to_ucs4(&c, 1, iconvName_);
 			if (ucs4.size() == 1) {
 				char_type const c = ucs4[0];
 				CharInfoMap::const_iterator const it = unicodesymbols.find(c);
@@ -149,19 +166,36 @@ Encoding::Encoding(string const & n, str
 					encodable_.insert(c);
 			}
 		}
-		lyxerr.enable();
-		CharSet::iterator it = encodable_.find(start_encodable_);
-		while (it != encodable_.end()) {
-			encodable_.erase(it);
-			++start_encodable_;
-			it = encodable_.find(start_encodable_);
+	} else {
+		// We do not know how many code points this encoding has, and
+		// they do not have a direct representation as a single byte,
+		// therefore we need to check all UCS4 code points.
+		// This is expensive!
+		for (char_type c = 0; c < max_ucs4; ++c) {
+			std::vector<char> const eightbit = ucs4_to_eightbit(&c, 1, iconvName_);
+			if (!eightbit.empty()) {
+				CharInfoMap::const_iterator const it = unicodesymbols.find(c);
+				if (it == unicodesymbols.end() || !it->second.force)
+					encodable_.insert(c);
+			}
 		}
 	}
+	lyxerr.enable();
+	CharSet::iterator it = encodable_.find(start_encodable_);
+	while (it != encodable_.end()) {
+		encodable_.erase(it);
+		++start_encodable_;
+		it = encodable_.find(start_encodable_);
+	}
+	complete_ = true;
 }
 
 
 docstring const Encoding::latexChar(char_type c) const
 {
+	// validate() should have been called before
+	BOOST_ASSERT(complete_);
+
 	if (c < start_encodable_)
 		return docstring(1, c);
 	if (encodable_.find(c) == encodable_.end()) {
@@ -181,6 +215,8 @@ docstring const Encoding::latexChar(char
 
 void Encoding::validate(char_type c, LaTeXFeatures & features) const
 {
+	if (!complete_)
+		init();
 	// Add the preamble stuff even if c can be encoded in this encoding,
 	// since the inputenc package only maps the code point c to a command,
 	// it does not make this command available.
@@ -272,6 +308,21 @@ Encoding const * Encodings::getFromLaTeX
 }
 
 
+Encoding const * Encodings::getFromIconvName(string const & name) const
+{
+	// We don't use std::find_if because it makes copies of the pairs in
+	// the map.
+	// This linear search is OK since we don't have many encodings.
+	// Users could even optimize it by putting the encodings they use
+	// most at the top of lib/encodings.
+	EncodingList::const_iterator const end = encodinglist.end();
+	for (EncodingList::const_iterator it = encodinglist.begin(); it != end; ++it)
+		if (it->second.iconvName() == name)
+			return &it->second;
+	return 0;
+}
+
+
 Encodings::Encodings()
 {
 }
@@ -363,8 +414,32 @@ void Encodings::read(FileName const & en
 			string const latexname = lex.getString();
 			lex.next();
 			string const iconvname = lex.getString();
+			lex.next();
+			string const width = lex.getString();
+			bool fixedwidth;
+			if (width == "fixed")
+				fixedwidth = true;
+			else if (width == "variable")
+				fixedwidth = false;
+			else
+				lex.printError("Encodings::read: "
+				               "Unknown width: `$$Token'");
+			lex.next();
+			string const p = lex.getString();
+			Encoding::Package package;
+			if (p == "none")
+				package = Encoding::none;
+			else if (p == "inputenc")
+				package = Encoding::inputenc;
+			else if (p == "CJK")
+				package = Encoding::CJK;
+			else
+				lex.printError("Encodings::read: "
+				               "Unknown package: `$$Token'");
 			lyxerr[Debug::INFO] << "Reading encoding " << name << endl;
-			encodinglist[name] = Encoding(name, latexname, iconvname);
+			encodinglist[name] = Encoding(name, latexname,
+			                              iconvname, fixedwidth,
+			                              package);
 			if (lex.lex() != et_end)
 				lex.printError("Encodings::read: "
 					       "missing end");
Index: src/output_latex.C
===================================================================
--- src/output_latex.C	(Revision 17674)
+++ src/output_latex.C	(Arbeitskopie)
@@ -105,7 +105,8 @@ TeXEnvironment(Buffer const & buf,
 	if (par_language->babel() != prev_par_language->babel()) {
 
 		if (!lyxrc.language_command_end.empty() &&
-		    prev_par_language->babel() != doc_language->babel()) {
+		    prev_par_language->babel() != doc_language->babel() &&
+		    !prev_par_language->babel().empty()) {
 			os << from_ascii(subst(
 				lyxrc.language_command_end,
 				"$$lang",
@@ -115,7 +116,8 @@ TeXEnvironment(Buffer const & buf,
 		}
 
 		if (lyxrc.language_command_end.empty() ||
-		    par_language->babel() != doc_language->babel()) {
+		    par_language->babel() != doc_language->babel() &&
+		    !par_language->babel().empty()) {
 			os << from_ascii(subst(
 				lyxrc.language_command_begin,
 				"$$lang",
@@ -271,7 +273,8 @@ TeXOnePar(Buffer const & buf,
 		     || boost::prior(pit)->getDepth() < pit->getDepth())))
 	{
 		if (!lyxrc.language_command_end.empty() &&
-		    prev_par_language->babel() != doc_language->babel())
+		    prev_par_language->babel() != doc_language->babel() &&
+		    !prev_par_language->babel().empty())
 		{
 			os << from_ascii(subst(lyxrc.language_command_end,
 				"$$lang",
@@ -281,7 +284,8 @@ TeXOnePar(Buffer const & buf,
 		}
 
 		if (lyxrc.language_command_end.empty() ||
-		    par_language->babel() != doc_language->babel())
+		    par_language->babel() != doc_language->babel() &&
+		    !par_language->babel().empty())
 		{
 			os << from_ascii(subst(
 				lyxrc.language_command_begin,
@@ -430,17 +434,21 @@ TeXOnePar(Buffer const & buf,
 			os << '\n';
 			texrow.newline();
 		}
-		if (lyxrc.language_command_end.empty())
-			os << from_ascii(subst(
-				lyxrc.language_command_begin,
-				"$$lang",
-				doc_language->babel()));
-		else
+		if (lyxrc.language_command_end.empty()) {
+			if (!doc_language->babel().empty()) {
+				os << from_ascii(subst(
+					lyxrc.language_command_begin,
+					"$$lang",
+					doc_language->babel()));
+				pending_newline = true;
+			}
+		} else if (!par_language->babel().empty()) {
 			os << from_ascii(subst(
 				lyxrc.language_command_end,
 				"$$lang",
 				par_language->babel()));
-		pending_newline = true;
+			pending_newline = true;
+		}
 	}
 
 	if (pending_newline) {
@@ -567,22 +575,43 @@ void latexParagraphs(Buffer const & buf,
 int switchEncoding(odocstream & os, BufferParams const & bparams,
                    Encoding const & oldEnc, Encoding const & newEnc)
 {
-	// FIXME thailatex does not support the inputenc package, so we
-	// ignore switches from/to tis620-0 encoding here. This does of
-	// course only work as long as the non-thai text contains ASCII
-	// only, but it is the best we can do.
+	// FIXME We ignore encoding switches from/to encodings that do
+	// neither support the inputenc package nor the CJK package here.
+	// This does of course only work in special cases (e.g. switch from
+	// tis620-0 to latin1, but the text in latin1 contains ASCII only,
+	// but it is the best we can do.
 	if ((bparams.inputenc == "auto" || bparams.inputenc == "default") &&
 	    oldEnc.name() != newEnc.name() &&
-	    oldEnc.name() != "ascii" && newEnc.name() != "ascii" &&
-	    oldEnc.name() != "tis620-0" && newEnc.name() != "tis620-0") {
+	    oldEnc.package() != Encoding::none &&
+	    newEnc.package() != Encoding::none) {
 		lyxerr[Debug::LATEX] << "Changing LaTeX encoding from "
 		                     << oldEnc.name() << " to "
 		                     << newEnc.name() << endl;
 		os << setEncoding(newEnc.iconvName());
 		if (bparams.inputenc != "default") {
 			docstring const inputenc(from_ascii(newEnc.latexName()));
-			os << "\\inputencoding{" << inputenc << '}';
-			return 16 + inputenc.length();
+			switch (newEnc.package()) {
+			case Encoding::none:
+				break;
+			case Encoding::inputenc: {
+				int count = inputenc.length();
+				if (oldEnc.package() == Encoding::CJK) {
+					os << "\\end{CJK}";
+					count += 9;
+				}
+				os << "\\inputencoding{" << inputenc << '}';
+				return count + 16;
+			}
+			case Encoding::CJK: {
+				int count = inputenc.length();
+				if (oldEnc.package() == Encoding::CJK) {
+					os << "\\end{CJK}";
+					count += 9;
+				}
+				os << "\\begin{CJK}{" << inputenc << "}{}";
+				return count + 15;
+			}
+			}
 		}
 	}
 	return 0;
Index: lib/lyx2lyx/LyX.py
===================================================================
--- lib/lyx2lyx/LyX.py	(Revision 17674)
+++ lib/lyx2lyx/LyX.py	(Arbeitskopie)
@@ -74,7 +74,7 @@ format_relation = [("0_06",    [200], ge
                    ("1_2",     [220], generate_minor_versions("1.2" , 4)),
                    ("1_3",     [221], generate_minor_versions("1.3" , 7)),
                    ("1_4", range(222,246), generate_minor_versions("1.4" , 3)),
-                   ("1_5", range(246,264), generate_minor_versions("1.5" , 0))]
+                   ("1_5", range(246,265), generate_minor_versions("1.5" , 0))]
 
 
 def formats_list():
Index: lib/lyx2lyx/lyx_1_5.py
===================================================================
--- lib/lyx2lyx/lyx_1_5.py	(Revision 17674)
+++ lib/lyx2lyx/lyx_1_5.py	(Arbeitskopie)
@@ -1246,6 +1246,28 @@ def revert_language_name(document):
         document.header[i] = "\\language %s" % document.language
 
 
+def revert_CJK(document):
+    " Set CJK encodings to default and languages chinese, japanese and korean to english. "
+    encodings = ["Bg5", "Bg5+", "GB", "GBt", "GBK", "JIS",
+                 "KS", "SJIS", "UTF8", "EUC-TW", "EUC-JP"]
+    i = find_token(document.header, "\\inputencoding", 0)
+    if i == -1:
+        document.header.append("\\inputencoding auto")
+    else:
+        inputenc = get_value(document.header, "\\inputencoding", i)
+        if inputenc in encodings:
+            document.header[i] = "\\inputencoding default"
+    document.inputencoding = get_value(document.header, "\\inputencoding", 0)
+
+    if document.language == "chinese-simplified" or \
+       document.language == "chinese-traditional" or \
+       document.language == "japanese" or document.language == "korean":
+        document.language = "english"
+        i = find_token(document.header, "\\language", 0)
+        if i != -1:
+            document.header[i] = "\\language english"
+
+
 ##
 # Conversion hub
 #
@@ -1268,9 +1290,11 @@ convert = [[246, []],
            [260, []],
            [261, [convert_changes]],
            [262, []],
-           [263, [normalize_language_name]]]
+           [263, [normalize_language_name]],
+           [264, []]]
 
-revert =  [[262, [revert_language_name]],
+revert =  [[263, [revert_CJK]],
+           [262, [revert_language_name]],
            [261, [revert_ascii]],
            [260, []],
            [259, [revert_utf8x]],
Index: lib/languages
===================================================================
--- lib/languages	(Revision 17674)
+++ lib/languages	(Arbeitskopie)
@@ -14,6 +14,8 @@ bulgarian   bulgarian   "Bulgarian"     
 canadian    canadian	"Canadian"	false  iso8859-15 en_CA	 ""
 canadien    canadien	"French Canadian"	false  iso8859-15  fr_CA	 ""
 catalan     catalan 	"Catalan"	false  iso8859-15 ca_ES	 ""
+chinese-simplified ""	"Chinese (simplified)"	false euc-cn zh	 ""
+chinese-traditional ""	"Chinese (traditional)"	false utf8-cjk zh	 ""
 croatian    croatian	"Croatian"	false  iso8859-2  hr_HR	 ""
 czech       czech	"Czech"		false  iso8859-2  cs_CZ	 ""
 danish      danish	"Danish"	false  iso8859-15 da_DK	 ""
@@ -35,7 +37,9 @@ hebrew      hebrew	"Hebrew"	true   cp125
 #hungarian   hungarian	"Hungarian"	false  iso8859-2  ""	 ""
 irish       irish	"Irish"		false  iso8859-15 ga_IE	 ""
 italian     italian	"Italian"	false  iso8859-15 it_IT	 ""
+japanese    ""	 	"Japanese"	false  euc-jp     ja	 ""
 kazakh	    kazakh	"Kazakh"	false  pt154      kk_KZ	 ""
+korean      "" 		"Korean"	false  euc-kr     ko	 ""
 #lsorbian    lsorbian	"Lsorbian"	false  iso8859-2  ""	 ""
 # no ISO listing for lsorbian (Garst)
 lithuanian  lithuanian	"Lithuanian"	false  iso8859-13 lt_LT	 ""
Index: lib/encodings
===================================================================
--- lib/encodings	(Revision 17674)
+++ lib/encodings	(Arbeitskopie)
@@ -1,125 +1,181 @@
 # FIXME: Have a look at the encodings known by the inputenc package and add
 # missing ones. Caution: File format change!
 
-# Note that you can only add single byte encodings to this file without
-# changing some C++ code.
-# The only multibyte encoding that is currently supported is utf8, and this
-# support is hardcoded to the iconv name "UTF-8".
+# Note that you can only add singlebyte encodings to this file.
+# LyX does not support the output of multibyte encodings (e.g. utf16).
+# It does support singlebyte encodings with variable with (e.g. utf8).
+# These are marked with the "variable" keyword.
+# Fixed width encodings are marked with the "fixed" keyword.
 
-# Order of names: LyX name LaTeX name iconv name
+# Syntax: Encoding <LyX name> <LaTeX name> <iconv name> fixed|variable <package> End
 
-Encoding utf8 utf8 UTF-8
+# encodings used by inputenc.sty
+
+Encoding utf8 utf8 UTF-8 variable inputenc
 End
 
 # This one is used by many CJK packages. utf8 is supposed to be the successor,
 # but does not have all features of utf8x yet.
-Encoding utf8x utf8x UTF-8
+Encoding utf8x utf8x UTF-8 variable inputenc
 End
 
-Encoding iso8859-1 latin1 ISO-8859-1
+Encoding iso8859-1 latin1 ISO-8859-1 fixed inputenc
 End
 
-Encoding iso8859-2 latin2 ISO-8859-2
+Encoding iso8859-2 latin2 ISO-8859-2 fixed inputenc
 End
 
-Encoding iso8859-3 latin3 ISO-8859-3
+Encoding iso8859-3 latin3 ISO-8859-3 fixed inputenc
 End
 
-Encoding iso8859-4 latin4 ISO-8859-4
+Encoding iso8859-4 latin4 ISO-8859-4 fixed inputenc
 End
 
-Encoding iso8859-5 iso88595 ISO-8859-5
+Encoding iso8859-5 iso88595 ISO-8859-5 fixed inputenc
 End
 
 # Not standard, see http://tug.ctan.org/tex-archive/language/arabic/arabi/arabi/texmf/latex/arabi/
-Encoding iso8859-6 8859-6 ISO-8859-6
+Encoding iso8859-6 8859-6 ISO-8859-6 fixed inputenc
 End
 
-Encoding iso8859-7 iso-8859-7 ISO-8859-7
+Encoding iso8859-7 iso-8859-7 ISO-8859-7 fixed inputenc
 End
 
-Encoding iso8859-8 8859-8 ISO-8859-8
+Encoding iso8859-8 8859-8 ISO-8859-8 fixed inputenc
 End
 
-Encoding iso8859-9 latin5 ISO-8859-9
+Encoding iso8859-9 latin5 ISO-8859-9 fixed inputenc
 End
 
 # Not standard, see http://www.vtex.lt/tex/littex/index.html
-Encoding iso8859-13 l7xenc ISO-8859-13
+Encoding iso8859-13 l7xenc ISO-8859-13 fixed inputenc
 End
 
-Encoding iso8859-15 latin9 ISO-8859-15
+Encoding iso8859-15 latin9 ISO-8859-15 fixed inputenc
 End
 
-Encoding iso8859-16 latin10 ISO-8859-16
+Encoding iso8859-16 latin10 ISO-8859-16 fixed inputenc
 End
 
-Encoding cp437 cp437 CP437
+Encoding cp437 cp437 CP437 fixed inputenc
 End
 
 # cp437, but on position 225 is sz instead of beta
-Encoding cp437de cp437de CP437
+Encoding cp437de cp437de CP437 fixed inputenc
 End
 
-Encoding cp850 cp850 CP850
+Encoding cp850 cp850 CP850 fixed inputenc
 End
 
-Encoding cp852 cp852 CP852
+Encoding cp852 cp852 CP852 fixed inputenc
 End
 
-Encoding cp855 cp855 CP855
+Encoding cp855 cp855 CP855 fixed inputenc
 End
 
-Encoding cp858 cp858 CP858
+Encoding cp858 cp858 CP858 fixed inputenc
 End
 
-Encoding cp862 cp862 CP862
+Encoding cp862 cp862 CP862 fixed inputenc
 End
 
-Encoding cp865 cp865 CP865
+Encoding cp865 cp865 CP865 fixed inputenc
 End
 
-Encoding cp866 cp866 CP866
+Encoding cp866 cp866 CP866 fixed inputenc
 End
 
-Encoding cp1250 cp1250 CP1250
+Encoding cp1250 cp1250 CP1250 fixed inputenc
 End
 
-Encoding cp1251 cp1251 CP1251
+Encoding cp1251 cp1251 CP1251 fixed inputenc
 End
 
-Encoding cp1252 cp1252 CP1252
+Encoding cp1252 cp1252 CP1252 fixed inputenc
 End
 
-Encoding cp1255 cp1255 CP1255
+Encoding cp1255 cp1255 CP1255 fixed inputenc
 End
 
 # Not standard, see http://tug.ctan.org/tex-archive/language/arabic/arabi/arabi/texmf/latex/arabi/
-Encoding cp1256 cp1256 CP1256
+Encoding cp1256 cp1256 CP1256 fixed inputenc
 End
 
-Encoding cp1257 cp1257 CP1257
+Encoding cp1257 cp1257 CP1257 fixed inputenc
 End
 
-Encoding koi8 koi8-r KOI8-R
+Encoding koi8 koi8-r KOI8-R fixed inputenc
 End
 
-Encoding koi8-u koi8-u KOI8-U
+Encoding koi8-u koi8-u KOI8-U fixed inputenc
 End
 
-# This one needs hardcoded support, since the inputenc package does not know
-# tis620-0, and thailatex sets up babel directly to use tis620-0, so the value
-# for inputenc is never output to .tex files (but needed for the hardcoded
-# tis620-0 support).
-Encoding tis620-0 tis620-0 TIS620-0
+Encoding pt154 pt154 PT154 fixed inputenc
 End
 
-Encoding pt154 pt154 PT154
+Encoding pt254 pt254 PT254 fixed inputenc
 End
 
-Encoding pt254 pt254 PT254
+
+# encodings used by CJK.sty
+
+# The following encodings that are supported by the CJK package are not
+# included here, because they are not widely used and lack proper iconv support:
+# Bg5+, GBt
+# See the NOTES file of libiconv for details.
+
+# The following encodings can't be handled directly, because the code points
+# of TeX control characters like {, } and \ can occur in the second byte:
+
+# For traditional chinese
+#Encoding big5 Bg5 BIG5 variable CJK
+#End
+
+# For japanese
+#Encoding shift-jis SJIS SJIS variable CJK
+#End
+
+# The following encodings need hardcoded support of the encodable unicode
+# range, but are known by iconv:
+
+# For simplified chinese
+Encoding euc-cn GB EUC-CN variable CJK
+End
+
+# For simplified chinese
+Encoding gbk GBK GBK variable CJK
 End
 
+# For japanese
+Encoding jis JIS JIS variable CJK
+End
+
+# For korean
+Encoding euc-kr KS EUC-KR variable CJK
+End
+
+# The CJK package has yet another name for utf8...
+Encoding utf8-cjk UTF8 UTF-8 variable CJK
+End
+
+# For traditional chinese
+Encoding euc-tw EUC-TW EUC-TW variable CJK
+End
+
+# For japanese
+Encoding euc-jp EUC-JP EUC-JP variable CJK
+End
+
+
+# This one needs hardcoded support, since the inputenc package does not know
+# tis620-0, and thailatex sets up babel directly to use tis620-0, so the
+# LaTeX name is never output to .tex files (but needed for the hardcoded
+# tis620-0 support).
+Encoding tis620-0 tis620-0 TIS620-0 fixed none
+End
+
+
 # Pure 7bit ASCII encoding (partially hardcoded in LyX)
-Encoding ascii ascii ascii
+Encoding ascii ascii ascii fixed none
 End
+

Reply via email to