diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 24c0fd4..839ff42 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -520,6 +520,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
 				_printTocEntry(AH, te, ropt, true, false);
 
 				if (strcmp(te->desc, "BLOBS") == 0 ||
+					strcmp(te->desc, "BLOB DATA") == 0 ||
 					strcmp(te->desc, "BLOB COMMENTS") == 0)
 				{
 					ahlog(AH, 1, "restoring %s\n", te->desc);
@@ -903,7 +904,7 @@ EndRestoreBlobs(ArchiveHandle *AH)
  * Called by a format handler to initiate restoration of a blob
  */
 void
-StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
+StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool cleanup, bool compat)
 {
 	Oid			loOid;
 
@@ -914,24 +915,29 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
 
 	ahlog(AH, 2, "restoring large object with OID %u\n", oid);
 
-	if (drop)
-		DropBlobIfExists(AH, oid);
+	if (cleanup)
+		CleanupBlobIfExists(AH, oid, compat);
 
 	if (AH->connection)
 	{
-		loOid = lo_create(AH->connection, oid);
-		if (loOid == 0 || loOid != oid)
-			die_horribly(AH, modulename, "could not create large object %u\n",
-						 oid);
-
+		if (compat)
+		{
+			loOid = lo_create(AH->connection, oid);
+			if (loOid == 0 || loOid != oid)
+				die_horribly(AH, modulename, "could not create large object %u\n",
+							 oid);
+		}
 		AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
 		if (AH->loFd == -1)
 			die_horribly(AH, modulename, "could not open large object\n");
 	}
 	else
 	{
-		ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
-				 oid, INV_WRITE);
+		if (compat)
+			ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n", oid, INV_WRITE);
+		else
+			ahprintf(AH, "SELECT pg_catalog.lo_open(%u, %d);\n",
+					 oid, INV_WRITE);
 	}
 
 	AH->writingBlob = 1;
@@ -1940,7 +1946,8 @@ WriteDataChunks(ArchiveHandle *AH)
 			AH->currToc = te;
 			/* printf("Writing data for %d (%x)\n", te->id, te); */
 
-			if (strcmp(te->desc, "BLOBS") == 0)
+			if (strcmp(te->desc, "BLOBS") == 0 ||
+				strcmp(te->desc, "BLOB DATA") == 0)
 			{
 				startPtr = AH->StartBlobsPtr;
 				endPtr = AH->EndBlobsPtr;
@@ -2077,6 +2084,7 @@ ReadToc(ArchiveHandle *AH)
 				te->section = SECTION_NONE;
 			else if (strcmp(te->desc, "TABLE DATA") == 0 ||
 					 strcmp(te->desc, "BLOBS") == 0 ||
+					 strcmp(te->desc, "BLOB DATA") == 0 ||
 					 strcmp(te->desc, "BLOB COMMENTS") == 0)
 				te->section = SECTION_DATA;
 			else if (strcmp(te->desc, "CONSTRAINT") == 0 ||
@@ -2235,6 +2243,10 @@ _tocEntryRequired(TocEntry *te, RestoreOptions *ropt, bool include_acls)
 	if (!ropt->create && strcmp(te->desc, "DATABASE") == 0)
 		return 0;
 
+	/* Do nothing if "BLOB ITEM" section with --shcemaOnly */
+	if (strcmp(te->desc, "BLOB ITEM") == 0 && ropt->schemaOnly)
+		return 0;
+
 	/* Check options for selective dump/restore */
 	if (ropt->schemaNames)
 	{
@@ -2713,6 +2725,13 @@ _getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH)
 		return;
 	}
 
+	/* Use ALTER LARGE OBJECT for BLOB ITEM */
+	if (strcmp(type, "BLOB ITEM") == 0)
+	{
+		appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
+		return;
+	}
+
 	write_msg(modulename, "WARNING: don't know how to set owner for object type %s\n",
 			  type);
 }
@@ -2824,6 +2843,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isDat
 		strlen(te->owner) > 0 && strlen(te->dropStmt) > 0)
 	{
 		if (strcmp(te->desc, "AGGREGATE") == 0 ||
+			strcmp(te->desc, "BLOB ITEM") == 0 ||
 			strcmp(te->desc, "CONVERSION") == 0 ||
 			strcmp(te->desc, "DATABASE") == 0 ||
 			strcmp(te->desc, "DOMAIN") == 0 ||
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index c09cec5..acb1986 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -359,7 +359,7 @@ int			ReadOffset(ArchiveHandle *, pgoff_t *);
 size_t		WriteOffset(ArchiveHandle *, pgoff_t, int);
 
 extern void StartRestoreBlobs(ArchiveHandle *AH);
-extern void StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop);
+extern void StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool cleanup, bool compat);
 extern void EndRestoreBlob(ArchiveHandle *AH, Oid oid);
 extern void EndRestoreBlobs(ArchiveHandle *AH);
 
@@ -371,7 +371,7 @@ extern void InitArchiveFmt_Tar(ArchiveHandle *AH);
 extern bool isValidTarHeader(char *header);
 
 extern int	ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *newUser);
-extern void	DropBlobIfExists(ArchiveHandle *AH, Oid oid);
+extern void	CleanupBlobIfExists(ArchiveHandle *AH, Oid oid, bool compat);
 
 int			ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH);
 int			ahprintf(ArchiveHandle *AH, const char *fmt,...) __attribute__((format(printf, 2, 3)));
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index ea16c0b..c001cde 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -54,7 +54,7 @@ static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
 static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
 static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
 static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
-static void _LoadBlobs(ArchiveHandle *AH, bool drop);
+static void _LoadBlobs(ArchiveHandle *AH, bool cleanup, bool compat);
 static void _Clone(ArchiveHandle *AH);
 static void _DeClone(ArchiveHandle *AH);
 
@@ -498,7 +498,10 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
 			break;
 
 		case BLK_BLOBS:
-			_LoadBlobs(AH, ropt->dropSchema);
+			if (strcmp(te->desc, "BLOBS") == 0 || ropt->dataOnly)
+				_LoadBlobs(AH, ropt->dropSchema, true);
+			else
+				_LoadBlobs(AH, ropt->dropSchema, false);
 			break;
 
 		default:				/* Always have a default */
@@ -619,7 +622,7 @@ _PrintData(ArchiveHandle *AH)
 }
 
 static void
-_LoadBlobs(ArchiveHandle *AH, bool drop)
+_LoadBlobs(ArchiveHandle *AH, bool cleanup, bool compat)
 {
 	Oid			oid;
 
@@ -628,7 +631,7 @@ _LoadBlobs(ArchiveHandle *AH, bool drop)
 	oid = ReadInt(AH);
 	while (oid != 0)
 	{
-		StartRestoreBlob(AH, oid, drop);
+		StartRestoreBlob(AH, oid, cleanup, compat);
 		_PrintData(AH);
 		EndRestoreBlob(AH, oid);
 		oid = ReadInt(AH);
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 6a195a9..b1c7e8d 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -12,6 +12,7 @@
 
 #include "pg_backup_db.h"
 #include "dumputils.h"
+#include "libpq/libpq-fs.h"
 
 #include <unistd.h>
 
@@ -653,20 +654,21 @@ CommitTransaction(ArchiveHandle *AH)
 }
 
 void
-DropBlobIfExists(ArchiveHandle *AH, Oid oid)
+CleanupBlobIfExists(ArchiveHandle *AH, Oid oid, bool compat)
 {
 	/* Call lo_unlink only if exists to avoid not-found error. */
-	if (PQserverVersion(AH->connection) >= 80500)
-	{
+	if (PQserverVersion(AH->connection) < 90000)
+		die_horribly(AH, NULL,
+					 "could not restore large object into older server");
+
+	if (compat)
 		ahprintf(AH, "SELECT pg_catalog.lo_unlink(oid) "
-					 "FROM pg_catalog.pg_largeobject_metadata "
-					 "WHERE oid = %u;\n", oid);
-	}
+				 "FROM pg_catalog.pg_largeobject_metadata "
+				 "WHERE oid = %u;\n", oid);
 	else
-	{
-		ahprintf(AH, "SELECT CASE WHEN EXISTS(SELECT 1 FROM pg_catalog.pg_largeobject WHERE loid = '%u') THEN pg_catalog.lo_unlink('%u') END;\n",
-				 oid, oid);
-	}
+		ahprintf(AH, "SELECT pg_catalog.lo_truncate(pg_catalog.lo_open(oid, %d), 0) "
+				 "FROM pg_catalog.pg_largeobject_metadata "
+				 "WHERE oid = %u;\n", INV_READ, oid);
 }
 
 static bool
diff --git a/src/bin/pg_dump/pg_backup_files.c b/src/bin/pg_dump/pg_backup_files.c
index 1faac0a..855d3b8 100644
--- a/src/bin/pg_dump/pg_backup_files.c
+++ b/src/bin/pg_dump/pg_backup_files.c
@@ -66,7 +66,7 @@ typedef struct
 } lclTocEntry;
 
 static const char *modulename = gettext_noop("file archiver");
-static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
+static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt, bool compat);
 static void _getBlobTocEntry(ArchiveHandle *AH, Oid *oid, char *fname);
 
 /*
@@ -329,8 +329,10 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
 	if (!tctx->filename)
 		return;
 
-	if (strcmp(te->desc, "BLOBS") == 0)
-		_LoadBlobs(AH, ropt);
+	if (strcmp(te->desc, "BLOBS") == 0 || ropt->dataOnly)
+		_LoadBlobs(AH, ropt, true);
+	else if (strcmp(te->desc, "BLOB DATA") == 0)
+		_LoadBlobs(AH, ropt, false);
 	else
 		_PrintFileData(AH, tctx->filename, ropt);
 }
@@ -365,7 +367,7 @@ _getBlobTocEntry(ArchiveHandle *AH, Oid *oid, char fname[K_STD_BUF_SIZE])
 }
 
 static void
-_LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
+_LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt, bool compat)
 {
 	Oid			oid;
 	lclContext *ctx = (lclContext *) AH->formatData;
@@ -382,7 +384,7 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
 
 	while (oid != 0)
 	{
-		StartRestoreBlob(AH, oid, ropt->dropSchema);
+		StartRestoreBlob(AH, oid, ropt->dropSchema, compat);
 		_PrintFileData(AH, fname, ropt);
 		EndRestoreBlob(AH, oid);
 		_getBlobTocEntry(AH, &oid, fname);
diff --git a/src/bin/pg_dump/pg_backup_null.c b/src/bin/pg_dump/pg_backup_null.c
index 4217210..2570a84 100644
--- a/src/bin/pg_dump/pg_backup_null.c
+++ b/src/bin/pg_dump/pg_backup_null.c
@@ -147,14 +147,19 @@ _StartBlobs(ArchiveHandle *AH, TocEntry *te)
 static void
 _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
 {
+	bool	compat = (strcmp(te->desc, "BLOBS") == 0 ? true : false);
+
 	if (oid == 0)
 		die_horribly(AH, NULL, "invalid OID for large object\n");
 
 	if (AH->ropt->dropSchema)
-		DropBlobIfExists(AH, oid);
+		CleanupBlobIfExists(AH, oid, compat);
 
-	ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
-			 oid, INV_WRITE);
+	if (compat)
+		ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
+				 oid, INV_WRITE);
+	else
+		ahprintf(AH, "SELECT pg_catalog.lo_open(%u, %d);\n", oid, INV_WRITE);
 
 	AH->WriteDataPtr = _WriteBlobData;
 }
@@ -195,12 +200,14 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
 	{
 		AH->currToc = te;
 
-		if (strcmp(te->desc, "BLOBS") == 0)
+		if (strcmp(te->desc, "BLOBS") == 0 ||
+			strcmp(te->desc, "BLOB DATA") == 0)
 			_StartBlobs(AH, te);
 
 		(*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
 
-		if (strcmp(te->desc, "BLOBS") == 0)
+		if (strcmp(te->desc, "BLOBS") == 0 ||
+			strcmp(te->desc, "BLOB DATA") == 0)
 			_EndBlobs(AH, te);
 
 		AH->currToc = NULL;
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index 5cbc365..7f1a2c0 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -100,7 +100,7 @@ typedef struct
 
 static const char *modulename = gettext_noop("tar archiver");
 
-static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
+static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt, bool compat);
 
 static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode);
 static void tarClose(ArchiveHandle *AH, TAR_MEMBER *TH);
@@ -695,14 +695,16 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
 		return;
 	}
 
-	if (strcmp(te->desc, "BLOBS") == 0)
-		_LoadBlobs(AH, ropt);
+	if (strcmp(te->desc, "BLOBS") == 0 || ropt->dataOnly)
+		_LoadBlobs(AH, ropt, true);
+	else if (strcmp(te->desc, "BLOB DATA") == 0)
+		_LoadBlobs(AH, ropt, false);
 	else
 		_PrintFileData(AH, tctx->filename, ropt);
 }
 
 static void
-_LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
+_LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt, bool compat)
 {
 	Oid			oid;
 	lclContext *ctx = (lclContext *) AH->formatData;
@@ -725,7 +727,7 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
 			{
 				ahlog(AH, 1, "restoring large object OID %u\n", oid);
 
-				StartRestoreBlob(AH, oid, ropt->dropSchema);
+				StartRestoreBlob(AH, oid, ropt->dropSchema, compat);
 
 				while ((cnt = tarRead(buf, 4095, th)) > 0)
 				{
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 2db9e0f..513f110 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -190,9 +190,9 @@ static void selectSourceSchema(const char *schemaName);
 static char *getFormattedTypeName(Oid oid, OidOptions opts);
 static char *myFormatType(const char *typname, int32 typmod);
 static const char *fmtQualifiedId(const char *schema, const char *id);
-static bool hasBlobs(Archive *AH);
-static int	dumpBlobs(Archive *AH, void *arg);
-static int	dumpBlobComments(Archive *AH, void *arg);
+static void getBlobs(Archive *AH);
+static void dumpBlobItem(Archive *AH, BlobInfo *binfo);
+static int  dumpBlobData(Archive *AH, void *arg);
 static void dumpDatabase(Archive *AH);
 static void dumpEncoding(Archive *AH);
 static void dumpStdStrings(Archive *AH);
@@ -701,25 +701,8 @@ main(int argc, char **argv)
 			getTableDataFKConstraints();
 	}
 
-	if (outputBlobs && hasBlobs(g_fout))
-	{
-		/* Add placeholders to allow correct sorting of blobs */
-		DumpableObject *blobobj;
-		DumpableObject *blobcobj;
-
-		blobobj = (DumpableObject *) malloc(sizeof(DumpableObject));
-		blobobj->objType = DO_BLOBS;
-		blobobj->catId = nilCatalogId;
-		AssignDumpId(blobobj);
-		blobobj->name = strdup("BLOBS");
-
-		blobcobj = (DumpableObject *) malloc(sizeof(DumpableObject));
-		blobcobj->objType = DO_BLOB_COMMENTS;
-		blobcobj->catId = nilCatalogId;
-		AssignDumpId(blobcobj);
-		blobcobj->name = strdup("BLOB COMMENTS");
-		addObjectDependency(blobcobj, blobobj->dumpId);
-	}
+	if (outputBlobs)
+		getBlobs(g_fout);
 
 	/*
 	 * Collect dependency data to assist in ordering the objects.
@@ -1938,43 +1921,144 @@ dumpStdStrings(Archive *AH)
 
 
 /*
- * hasBlobs:
+ * getBlobs:
  *	Test whether database contains any large objects
  */
-static bool
-hasBlobs(Archive *AH)
+static void
+getBlobs(Archive *AH)
 {
-	bool		result;
-	const char *blobQry;
-	PGresult   *res;
+	PQExpBuffer		blobQry = createPQExpBuffer();
+	BlobInfo	   *blobobj;
+	DumpableObject *blobdata;
+	PGresult	   *res;
+	int				i;
 
 	/* Make sure we are in proper schema */
 	selectSourceSchema("pg_catalog");
 
 	/* Check for BLOB OIDs */
 	if (AH->remoteVersion >= 80500)
-		blobQry = "SELECT oid FROM pg_largeobject_metadata LIMIT 1";
+		appendPQExpBuffer(blobQry,
+						  "SELECT oid, (%s lomowner), lomacl"
+						  " FROM pg_largeobject_metadata",
+						  username_subquery);
 	else if (AH->remoteVersion >= 70100)
-		blobQry = "SELECT loid FROM pg_largeobject LIMIT 1";
+		appendPQExpBuffer(blobQry,
+						  "SELECT DISTINCT loid, NULL, NULL"
+						  " FROM pg_largeobject");
 	else
-		blobQry = "SELECT oid FROM pg_class WHERE relkind = 'l' LIMIT 1";
+		appendPQExpBuffer(blobQry,
+						  "SELECT DISTINCT oid, NULL, NULL"
+						  " FROM pg_class WHERE relkind = 'l'");
 
-	res = PQexec(g_conn, blobQry);
-	check_sql_result(res, g_conn, blobQry, PGRES_TUPLES_OK);
+	res = PQexec(g_conn, blobQry->data);
+	check_sql_result(res, g_conn, blobQry->data, PGRES_TUPLES_OK);
+
+	/*
+	 * If we have a large object at least, "BLOB DATA" section
+	 * is also necessary.
+	 */
+	if (PQntuples(res) > 0)
+	{
+		blobdata = (DumpableObject *) malloc(sizeof(DumpableObject));
+		blobdata->objType = DO_BLOB_DATA;
+		blobdata->catId = nilCatalogId;
+		AssignDumpId(blobdata);
+		blobdata->name = strdup("BLOBS");
+	}
 
-	result = PQntuples(res) > 0;
+	/*
+	 * If we don't want to dump metadata of large objects,
+	 * no need to create "BLOB ITEM" sections.
+	 */
+	if (dataOnly)
+	{
+		PQclear(res);
+		return;
+	}
+
+	/*
+	 * Now, a large object has its own "BLOB ITEM" section to
+	 * declare itself.
+	 */
+	for (i = 0; i < PQntuples(res); i++)
+	{
+		blobobj = (BlobInfo *) malloc(sizeof(BlobInfo));
+		blobobj->dobj.objType = DO_BLOB_ITEM;
+		blobobj->dobj.catId.oid = atooid(PQgetvalue(res, i, 0));
+		blobobj->dobj.catId.tableoid = LargeObjectRelationId;
+		AssignDumpId(&blobobj->dobj);
+
+		blobobj->dobj.name = strdup(PQgetvalue(res, i, 0));
+		blobobj->rolname = strdup(PQgetvalue(res, i, 1));
+		blobobj->blobacl = strdup(PQgetvalue(res, i, 2));
+	}
 
 	PQclear(res);
+}
 
-	return result;
+/*
+ * dumpBlobItem
+ *
+ * dump a definition of the given large object
+ */
+static void
+dumpBlobItem(Archive *AH, BlobInfo *binfo)
+{
+	PQExpBuffer		bquery;
+	PQExpBuffer		dquery;
+	PQExpBuffer		temp;
+
+	/* Skip if not to be dumped */
+	if (!binfo->dobj.dump || dataOnly)
+		return;
+
+	bquery = createPQExpBuffer();
+	dquery = createPQExpBuffer();
+	temp = createPQExpBuffer();
+
+	/*
+	 * Create an empty large object
+	 */
+	appendPQExpBuffer(bquery, "SELECT lo_create(%s);\n", binfo->dobj.name);
+	appendPQExpBuffer(dquery, "SELECT lo_unlink(%s);\n", binfo->dobj.name);
+
+	ArchiveEntry(AH, binfo->dobj.catId, binfo->dobj.dumpId,
+				 binfo->dobj.name,
+				 NULL, NULL,
+				 binfo->rolname, false,
+				 "BLOB ITEM", SECTION_PRE_DATA,
+				 bquery->data, dquery->data, NULL,
+				 binfo->dobj.dependencies, binfo->dobj.nDeps,
+				 NULL, NULL);
+
+	/*
+	 * Create a comment on large object, if necessary
+	 */
+	appendPQExpBuffer(temp, "LARGE OBJECT %s", binfo->dobj.name);
+	dumpComment(AH, temp->data, NULL, binfo->rolname,
+				binfo->dobj.catId, 0, binfo->dobj.dumpId);
+
+	/*
+	 * Dump access privileges, if necessary
+	 */
+	dumpACL(AH, binfo->dobj.catId, binfo->dobj.dumpId,
+			"LARGE OBJECT",
+			binfo->dobj.name, NULL,
+			binfo->dobj.name, NULL,
+			binfo->rolname, binfo->blobacl);
+
+	destroyPQExpBuffer(bquery);
+	destroyPQExpBuffer(dquery);
+	destroyPQExpBuffer(temp);
 }
 
 /*
- * dumpBlobs:
- *	dump all blobs
+ * dumpBlobData:
+ *	dump all the data contents of large object
  */
 static int
-dumpBlobs(Archive *AH, void *arg)
+dumpBlobData(Archive *AH, void *arg)
 {
 	const char *blobQry;
 	const char *blobFetchQry;
@@ -2022,7 +2106,7 @@ dumpBlobs(Archive *AH, void *arg)
 			loFd = lo_open(g_conn, blobOid, INV_READ);
 			if (loFd == -1)
 			{
-				write_msg(NULL, "dumpBlobs(): could not open large object: %s",
+				write_msg(NULL, "dumpBlobData(): could not open large object: %s",
 						  PQerrorMessage(g_conn));
 				exit_nicely();
 			}
@@ -2035,7 +2119,7 @@ dumpBlobs(Archive *AH, void *arg)
 				cnt = lo_read(g_conn, loFd, buf, LOBBUFSIZE);
 				if (cnt < 0)
 				{
-					write_msg(NULL, "dumpBlobs(): error reading large object: %s",
+					write_msg(NULL, "dumpBlobData(): error reading large object: %s",
 							  PQerrorMessage(g_conn));
 					exit_nicely();
 				}
@@ -2054,134 +2138,6 @@ dumpBlobs(Archive *AH, void *arg)
 	return 1;
 }
 
-/*
- * dumpBlobComments
- *	dump all blob properties.
- *  It has "BLOB COMMENTS" tag due to the historical reason, but note
- *  that it is the routine to dump all the properties of blobs.
- *
- * Since we don't provide any way to be selective about dumping blobs,
- * there's no need to be selective about their comments either.  We put
- * all the comments into one big TOC entry.
- */
-static int
-dumpBlobComments(Archive *AH, void *arg)
-{
-	const char *blobQry;
-	const char *blobFetchQry;
-	PQExpBuffer cmdQry = createPQExpBuffer();
-	PGresult   *res;
-	int			i;
-
-	if (g_verbose)
-		write_msg(NULL, "saving large object properties\n");
-
-	/* Make sure we are in proper schema */
-	selectSourceSchema("pg_catalog");
-
-	/* Cursor to get all BLOB comments */
-	if (AH->remoteVersion >= 80500)
-		blobQry = "DECLARE blobcmt CURSOR FOR SELECT oid, "
-			"obj_description(oid, 'pg_largeobject'), "
-			"pg_get_userbyid(lomowner), lomacl "
-			"FROM pg_largeobject_metadata";
-	else if (AH->remoteVersion >= 70300)
-		blobQry = "DECLARE blobcmt CURSOR FOR SELECT loid, "
-			"obj_description(loid, 'pg_largeobject'), NULL, NULL "
-			"FROM (SELECT DISTINCT loid FROM "
-			"pg_description d JOIN pg_largeobject l ON (objoid = loid) "
-			"WHERE classoid = 'pg_largeobject'::regclass) ss";
-	else if (AH->remoteVersion >= 70200)
-		blobQry = "DECLARE blobcmt CURSOR FOR SELECT loid, "
-			"obj_description(loid, 'pg_largeobject'), NULL, NULL "
-			"FROM (SELECT DISTINCT loid FROM pg_largeobject) ss";
-	else if (AH->remoteVersion >= 70100)
-		blobQry = "DECLARE blobcmt CURSOR FOR SELECT loid, "
-			"obj_description(loid), NULL, NULL "
-			"FROM (SELECT DISTINCT loid FROM pg_largeobject) ss";
-	else
-		blobQry = "DECLARE blobcmt CURSOR FOR SELECT oid, "
-			"	( "
-			"		SELECT description "
-			"		FROM pg_description pd "
-			"		WHERE pd.objoid=pc.oid "
-			"	), NULL, NULL "
-			"FROM pg_class pc WHERE relkind = 'l'";
-
-	res = PQexec(g_conn, blobQry);
-	check_sql_result(res, g_conn, blobQry, PGRES_COMMAND_OK);
-
-	/* Command to fetch from cursor */
-	blobFetchQry = "FETCH 100 IN blobcmt";
-
-	do
-	{
-		PQclear(res);
-
-		/* Do a fetch */
-		res = PQexec(g_conn, blobFetchQry);
-		check_sql_result(res, g_conn, blobFetchQry, PGRES_TUPLES_OK);
-
-		/* Process the tuples, if any */
-		for (i = 0; i < PQntuples(res); i++)
-		{
-			Oid			blobOid = atooid(PQgetvalue(res, i, 0));
-			char	   *lo_comment = PQgetvalue(res, i, 1);
-			char	   *lo_owner = PQgetvalue(res, i, 2);
-			char	   *lo_acl = PQgetvalue(res, i, 3);
-			char		lo_name[32];
-
-			resetPQExpBuffer(cmdQry);
-
-			/* comment on the blob */
-			if (!PQgetisnull(res, i, 1))
-			{
-				appendPQExpBuffer(cmdQry,
-								  "COMMENT ON LARGE OBJECT %u IS ", blobOid);
-				appendStringLiteralAH(cmdQry, lo_comment, AH);
-				appendPQExpBuffer(cmdQry, ";\n");
-			}
-
-			/* dump blob ownership, if necessary */
-			if (!PQgetisnull(res, i, 2))
-			{
-				appendPQExpBuffer(cmdQry,
-								  "ALTER LARGE OBJECT %u OWNER TO %s;\n",
-								  blobOid, lo_owner);
-			}
-
-			/* dump blob privileges, if necessary */
-			if (!PQgetisnull(res, i, 3) &&
-				!dataOnly && !aclsSkip)
-			{
-				snprintf(lo_name, sizeof(lo_name), "%u", blobOid);
-				if (!buildACLCommands(lo_name, NULL, "LARGE OBJECT",
-									  lo_acl, lo_owner, "",
-									  AH->remoteVersion, cmdQry))
-				{
-					write_msg(NULL, "could not parse ACL (%s) for "
-							  "large object %u", lo_acl, blobOid);
-					exit_nicely();
-				}
-			}
-
-			if (cmdQry->len > 0)
-			{
-				appendPQExpBuffer(cmdQry, "\n");
-				archputs(cmdQry->data, AH);
-			}
-		}
-	} while (PQntuples(res) > 0);
-
-	PQclear(res);
-
-	archputs("\n", AH);
-
-	destroyPQExpBuffer(cmdQry);
-
-	return 1;
-}
-
 static void
 binary_upgrade_set_type_oids_by_type_oid(PQExpBuffer upgrade_buffer,
 											   Oid pg_type_oid)
@@ -6524,21 +6480,24 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj)
 		case DO_DEFAULT_ACL:
 			dumpDefaultACL(fout, (DefaultACLInfo *) dobj);
 			break;
-		case DO_BLOBS:
-			ArchiveEntry(fout, dobj->catId, dobj->dumpId,
-						 dobj->name, NULL, NULL, "",
-						 false, "BLOBS", SECTION_DATA,
-						 "", "", NULL,
-						 dobj->dependencies, dobj->nDeps,
-						 dumpBlobs, NULL);
+		case DO_BLOB_ITEM:
+			dumpBlobItem(fout, (BlobInfo *) dobj);
 			break;
-		case DO_BLOB_COMMENTS:
+		case DO_BLOB_DATA:
+			/*
+			 * If --data-only is given, pg_dump skips DO_BLOB_ITEM entries,
+			 * because it triggers all the metadata of large objects (owner,
+			 * access privileges and comments).
+			 * In this case, we mark DO_BLOB_DATA entry as a legacy "BLOBS"
+			 * section to ensure pg_restore injects large object creation
+			 * just before data loading.
+			 */
 			ArchiveEntry(fout, dobj->catId, dobj->dumpId,
-						 dobj->name, NULL, NULL, "",
-						 false, "BLOB COMMENTS", SECTION_DATA,
-						 "", "", NULL,
+						 dobj->name, NULL, NULL, "", false,
+						 !dataOnly ? "BLOB DATA" : "BLOBS",
+						 SECTION_DATA, "", "", NULL,
 						 dobj->dependencies, dobj->nDeps,
-						 dumpBlobComments, NULL);
+						 dumpBlobData, NULL);
 			break;
 	}
 }
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 1e65fac..ccf7348 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -115,8 +115,8 @@ typedef enum
 	DO_FDW,
 	DO_FOREIGN_SERVER,
 	DO_DEFAULT_ACL,
-	DO_BLOBS,
-	DO_BLOB_COMMENTS
+	DO_BLOB_DATA,
+	DO_BLOB_ITEM,
 } DumpableObjectType;
 
 typedef struct _dumpableObject
@@ -443,6 +443,13 @@ typedef struct _defaultACLInfo
 	char	   *defaclacl;
 } DefaultACLInfo;
 
+typedef struct _blobInfo
+{
+	DumpableObject	dobj;
+	char	   *rolname;
+	char	   *blobacl;
+} BlobInfo;
+
 /* global decls */
 extern bool force_quotes;		/* double-quotes for identifiers flag */
 extern bool g_verbose;			/* verbose flag */
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 6676baf..be98c81 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -92,8 +92,8 @@ static const int newObjectTypePriority[] =
 	14,							/* DO_FDW */
 	15,							/* DO_FOREIGN_SERVER */
 	27,							/* DO_DEFAULT_ACL */
-	20,							/* DO_BLOBS */
-	21							/* DO_BLOB_COMMENTS */
+	21,							/* DO_BLOB_DATA */
+	20,							/* DO_BLOB_ITEM */
 };
 
 
@@ -1146,14 +1146,14 @@ describeDumpableObject(DumpableObject *obj, char *buf, int bufsize)
 					 "DEFAULT ACL %s  (ID %d OID %u)",
 					 obj->name, obj->dumpId, obj->catId.oid);
 			return;
-		case DO_BLOBS:
+		case DO_BLOB_DATA:
 			snprintf(buf, bufsize,
-					 "BLOBS  (ID %d)",
+					 "BLOB DATA  (ID %d)",
 					 obj->dumpId);
 			return;
-		case DO_BLOB_COMMENTS:
+		case DO_BLOB_ITEM:
 			snprintf(buf, bufsize,
-					 "BLOB COMMENTS  (ID %d)",
+					 "BLOB ITEM  (ID %d)",
 					 obj->dumpId);
 			return;
 	}
