CREATE TYPE sys.ku$_JobDesc1020 IS OBJECT (
job_name VARCHAR2(30), -- The job name
guid RAW(16), -- The job GUID
operation VARCHAR2(30), -- Current operation
job_mode VARCHAR2(30), -- Current mode
remote_link VARCHAR2(4000), -- DB link, if any
owner VARCHAR2(30), -- Job owner
platform VARCHAR2(101), -- Current job platform
exp_platform VARCHAR2(101), -- Export platform
global_name VARCHAR2(4000), -- Current global name
exp_global_name VARCHAR2(4000), -- Export global name
instance VARCHAR2(16), -- The instance name
db_version VARCHAR2(30), -- Version of objects
exp_db_version VARCHAR2(30), -- Export version
scn NUMBER, -- Job SCN
creator_privs VARCHAR2(30), -- Privs of job
start_time DATE, -- This job start time
exp_start_time DATE, -- Export start time
term_reason NUMBER, -- Job termination code
max_degree NUMBER, -- Max. parallelism
log_file VARCHAR2(4000), -- Log file name
sql_file VARCHAR2(4000), -- SQL file name
params ku$_ParamValues1010) -- Parameter list
/
CREATE TYPE sys.ku$_JobDesc1210 IS OBJECT (
job_name VARCHAR2(30), -- The job name
guid RAW(16), -- The job GUID
operation VARCHAR2(30), -- Current operation
job_mode VARCHAR2(30), -- Current mode
remote_link VARCHAR2(4000), -- DB link, if any
owner VARCHAR2(30), -- Job owner
platform VARCHAR2(101), -- Current job platform
exp_platform VARCHAR2(101), -- Export platform
global_name VARCHAR2(4000), -- Current global name
exp_global_name VARCHAR2(4000), -- Export global name
instance VARCHAR2(16), -- The instance name
db_version VARCHAR2(30), -- Cur. server software version
exp_db_version VARCHAR2(30), -- Export svr. software version
job_version VARCHAR2(30), -- Negotiated data version
scn NUMBER, -- Job SCN
creator_privs VARCHAR2(30), -- Privs of job
start_time DATE, -- This job start time
exp_start_time DATE, -- Export start time
term_reason NUMBER, -- Job termination code
max_degree NUMBER, -- Max. parallelism
timezone VARCHAR2(64), -- Cur. server timezone
exp_timezone VARCHAR2(64), -- Exp. server timezone
tstz_version NUMBER, -- Cur. server timezone version
exp_tstz_version NUMBER, -- Exp. server timezone
endianness VARCHAR2(16), -- Cur. platform's endianness
exp_endianness VARCHAR2(16), -- Exp. platform's endianness endianness is 'BIG' or 'LITTLE'
charset VARCHAR2(28), -- Cur. server charset
exp_charset VARCHAR2(28), -- Exp. server charset
ncharset VARCHAR2(28), -- Cur. server national charset
exp_ncharset VARCHAR2(28), -- Exp. server national charset
log_file VARCHAR2(4000), -- Log file name
sql_file VARCHAR2(4000), -- SQL file name
params ku$_ParamValues1010) -- Parameter list
/
DumpFile Types
CREATE TYPE sys.ku$_DumpFile1010 AS OBJECT (
file_name VARCHAR2(4000),
file_type NUMBER,
file_size NUMBER,
file_bytes_written NUMBER);
/
CREATE TYPE sys.ku$_DumpFileSet1010 AS TABLE OF sys.ku$_DumpFile1010;
/
CREATE TYPE sys.ku$_dumpfile_item IS OBJECT (
item_code NUMBER, -- Identifies header item
value VARCHAR2(2048)); -- Text string value
/
Executable-Package Equivalencies
The examples at right were written by Richard Harrison who invested a substantial effort in putting them together and are republished here with his permission.
The original post can be found here.
Executable
Package
ACCESS_METHOD
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'AUTOMATIC');
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'DIRECT_PATH');
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'EXTERNAL_TABLE');
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'COMPRESSION', value => 'ALL');
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'COMPRESSION', value => 'DATA_ONLY');
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'COMPRESSION', value => 'METADATA_ONLY');
CONTENT
dbms_datapump.set_parameter(handle => h1, name => 'INCLUDE_METADATA', value => 1); ALL
dbms_datapump.set_parameter(handle => h1, name => 'INCLUDE_METADATA', value => 0); DATA_ONLY
dbms_datapump.data_filter(handle => h1, name => 'INCLUDE_ROWS', value => 0); METADATA_ONLY
DATA_OPTIONS
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'DATA_OPTIONS', value => dbms_datapump.KU$_DATAOPT_XMLTYPE_CLOB);
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'ALL');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'DATA_ONLY');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'ENCRYPTED_COLUMNS_ONLY');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'METADATA_ONLY');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'NONE');
ENCRYPTION_ALGORITHM
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_ALGORITHM', value => 'AES128');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_ALGORITHM', value => 'AES192');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_ALGORITHM', value => 'AES256');
ENCRYPTION_MODE
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_MODE', value => 'DUAL');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_MODE', value => 'PASSWORD');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_MODE', value => 'TRANSPARENT');
ENCRYPTION_PASSWORD
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_PASSWORD', value => 'secret password');
ESTIMATE
dbms_datapump.set_parameter(handle => h1, name => 'ESTIMATE', value => 'BLOCKS');
ESTIMATE_ONLY
dbms_datapump.set_parameter(handle => h1, name => 'ESTIMATE_ONLY', value => 1);
dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_SCN', value => nnnnnnnn);
dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_SCN', value => TIMESTAMP_TO_SCN(TO_TIMESTAMP(TO_CHAR(SYSDATE))));
FLASHBACK_TIME
dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_TIME', value => 'TO_TIMESTAMP(TO_CHAR(SYSDATE))');
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'KEEP_MASTER', value => 1);
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'KEEP_MASTER', value => 0);
dbms_datapump.metadata_filter(HANDLE => HANDLR, NAME => 'METRICS', VALUE => 0);
dbms_datapump.metadata_filter(HANDLE => HANDLR, NAME => 'METRICS', VALUE => 1);
dbms_datapump.set_parameter(handle => h1, name => 'TRANSPORTABLE', value => 'ALWAYS');
dbms_datapump.set_parameter(handle => h1, name => 'TRANSPORTABLE', value => 'NEVER');
TRANSPORT_FULL_CHECK
dbms_datapump.set_parameter(handle => h1, name => 'TTS_FULL_CHECK', value => 0); N
dbms_datapump.set_parameter(handle => h1, name => 'TTS_FULL_CHECK', value => 1); Y
dbms_datapump.set_parameter(handle => h1, name => 'USER_METADATA', value => 1); export create user statement in schema mode (only if datapump_exp_full_database role is granted)
dbms_datapump.set_parameter(handle => h1, name => 'USER_METADATA', value => 0); don't export create user statement in schema mode
CREATE TYPE sys.ku$_JobStatus1020 IS OBJECT (
job_name VARCHAR2(30), -- Name of the job
operation VARCHAR2(30), -- Current operation
job_mode VARCHAR2(30), -- Current mode
bytes_processed NUMBER, -- Bytes so far
total_bytes NUMBER, -- Total bytes for job
percent_done NUMBER, -- Percent done
degree NUMBER, -- Of job parallelism
error_count NUMBER, -- #errors so far
state VARCHAR2(30), -- Current job state
phase NUMBER, -- Job phase
restart_count NUMBER, -- #Job restarts
worker_status_list ku$_WorkerStatusList1020, -- For (non-idle) job worker processes
files ku$_DumpFileSet1010); -- Dump file info
/
CREATE TYPE sys.ku$_JobStatus1120 IS OBJECT (
job_name VARCHAR2(30), -- Name of the job
operation VARCHAR2(30), -- Current operation
job_mode VARCHAR2(30), -- Current mode
bytes_processed NUMBER, -- Bytes so far
total_bytes NUMBER, -- Total bytes for job
percent_done NUMBER, -- Percent done
degree NUMBER, -- Of job parallelism
error_count NUMBER, -- #errors so far
state VARCHAR2(30), -- Current job state
phase NUMBER, -- Job phase
restart_count NUMBER, -- #Job restarts
worker_status_list ku$_WorkerStatusList1120, -- For (non-idle) job worker processes
files ku$_DumpFileSet1010); -- Dump file info
/
CREATE TYPE sys.ku$_JobStatus1210 IS OBJECT
(
job_name VARCHAR2(30), -- Name of the job
operation VARCHAR2(30), -- Current operation
job_mode VARCHAR2(30), -- Current mode
bytes_processed NUMBER, -- Bytes so far
total_bytes NUMBER, -- Total bytes for job
percent_done NUMBER, -- Percent done
degree NUMBER, -- Of job parallelism
error_count NUMBER, -- #errors so far
state VARCHAR2(30), -- Current job state
phase NUMBER, -- Job phase
restart_count NUMBER, -- #Job restarts worker_status_list ku$_WorkerStatusList1210,
-- For (non-idle)
-- job worker processes
files ku$_DumpFileSet1010) -- Dump file info
/
CREATE TYPE sys.ku$_Status1120 IS OBJECT (
mask NUMBER, -- Status types present
wip ku$_LogEntry1010, -- Work in progress
job_description ku$_JobDesc1020, -- Complete job description
job_status ku$_JobStatus1120, -- Detailed job status
error ku$_LogEntry1010) -- Multi-level context errors
/
CREATE TYPE sys.ku$_Status1210 IS OBJECT (
mask NUMBER, -- Status types present
wip ku$_LogEntry1010, -- Work in progress
job_description ku$_JobDesc1210, -- Complete job description
job_status ku$_JobStatus1210, -- Detailed job status
error ku$_LogEntry1010) -- Multi-level context errors
/
Log Entry & Error Types
CREATE TYPE sys.ku$_LogLine1010 AS OBJECT (
logLineNumber NUMBER,
errorNumber NUMBER,
LogText VARCHAR2(2000));
/
CREATE PUBLIC SYNONYM ku$_LogLine1010
FOR sys.ku$_LogLine1010;
CREATE TYPE sys.ku$_LogEntry1010 AS TABLE OF
sys.ku$_LogLine1010;
/
CREATE PUBLIC SYNONYM ku$_LogEntry1010
FOR sys.ku$_LogEntry1010;
Status Types
CREATE TYPE sys.ku$_status1010 AS OBJECT (
mask NUMBER,
wip ku$_LogEntry1010,
job_description ku$_JobDesc1010,
job_status ku$_JobStatus1010,
error ku$_LogEntry1010);
/
CREATE TYPE sys.ku$_Status1020 IS OBJECT (
mask NUMBER, -- Status types present
wip ku$_LogEntry1010, -- Work in progress
job_description ku$_JobDesc1020, -- Complete job description
job_status ku$_JobStatus1020, -- Detailed job status
error ku$_LogEntry1010) -- Multi-level context errors
/
CREATE TYPE sys.ku$_Status1120 IS OBJECT (
mask NUMBER, -- Status types present
wip ku$_LogEntry1010, -- Work in progress
job_description ku$_JobDesc1020, -- Complete job description
job_status ku$_JobStatus1120, -- Detailed job status
error ku$_LogEntry1010) -- Multi-level context errors
/
Worker Status Types
CREATE TYPE sys.ku$_WorkerStatus1010 AS OBJECT (
worker_number NUMBER,
process_name VARCHAR2(30),
state VARCHAR2(30),
schema VARCHAR2(30),
name VARCHAR2(4000),
object_type VARCHAR2(200),
partition VARCHAR2(30),
completed_objects NUMBER,
total_objects NUMBER,
completed_rows NUMBER,
completed_bytes NUMBER,
percent_done NUMBER)
/
CREATE TYPE sys.ku$_WorkerStatusList1010 AS TABLE OF sys.ku$_WorkerStatus1010
/
CREATE TYPE sys.ku$_WorkerStatus1020 AS OBJECT (
worker_number NUMBER, -- Worker process identifier
process_name VARCHAR2(30), -- Worker process name
state VARCHAR2(30), -- Worker process state
schema VARCHAR2(30), -- Schema name
name VARCHAR2(4000), -- Object name
object_type VARCHAR2(200), -- Object type
partition VARCHAR2(30), -- Partition name
completed_objects NUMBER, -- Completed number of objects
total_objects NUMBER, -- Total number of objects
completed_rows NUMBER, -- Number of rows completed
completed_bytes NUMBER, -- Number of bytes completed
percent_done NUMBER, -- Percent done current object
degree NUMBER); -- Degree of parallelism
/
CREATE TYPE sys.ku$_WorkerStatus1120 AS OBJECT (
worker_number NUMBER, -- Worker process identifier
process_name VARCHAR2(30), -- Worker process name
state VARCHAR2(30), -- Worker process state
schema VARCHAR2(30), -- Schema name
name VARCHAR2(4000), -- Object name
object_type VARCHAR2(200), -- Object type
partition VARCHAR2(30), -- Partition name
completed_objects NUMBER, -- Completed number of objects
total_objects NUMBER, -- Total number of objects
completed_rows NUMBER, -- Number of rows completed
completed_bytes NUMBER, -- Number of bytes completed
percent_done NUMBER, -- Percent done current object
degree NUMBER, -- Degree of parallelism
instance_id NUMBER); -- Instance ID where running
/
CREATE TYPE sys.ku$_WorkerStatus1210 AS OBJECT
(
worker_number NUMBER, -- Worker process identifier
process_name VARCHAR2(30), -- Worker process name
state VARCHAR2(30), -- Worker process state
schema VARCHAR2(30), -- Schema name
name VARCHAR2(4000), -- Object name
object_type VARCHAR2(200), -- Object type
partition VARCHAR2(30), -- Partition name
completed_objects NUMBER, -- Completed number of objects
total_objects NUMBER, -- Total number of objects
completed_rows NUMBER, -- Number of rows completed
completed_bytes NUMBER, -- Number of bytes completed
percent_done NUMBER, -- Percent done current object
degree NUMBER, -- Degree of parallelism
instance_id NUMBER, -- Instance ID where running
instance_name VARCHAR2(60), -- Instance Name where running
host_name VARCHAR2(64)); -- Host name where running
CREATE TYPE sys.ku$_WorkerStatusList1010 AS TABLE OF sys.ku$_WorkerStatus1010
/
CREATE TYPE sys.ku$_WorkerStatusList1020 AS TABLE OF sys.ku$_WorkerStatus1020
/
CREATE TYPE sys.ku$_WorkerStatusList1120 AS TABLE OF sys.ku$_WorkerStatus1120
/
CREATE TYPE sys.ku$_WorkerStatusList1210 AS TABLE OF sys.ku$_WorkerStatus1210
/
Dependencies
SELECT name FROM dba_dependencies WHERE referenced_name
= 'DBMS_DATAPUMP'
UNION
SELECT referenced_name FROM dba_dependencies WHERE name = 'DBMS_DATAPUMP'
ORDER BY 1;
INVALID_ARGVAL: An invalid value was supplied for an input parameter
ORA-39002
INVALID_OPERATION: Cannot be executed because of inconsistencies between the API and the job
ORA-39004
INVALID_STATE: The state of the job precludes the execution of the API
ORA-39005
INCONSISTENT_ARGS: Inconsistent arguments
ORA-39006
INTERNAL_ERROR: Internal datapump exception
ORA-31623
INVALID_HANDLE: Incorrect handle specified for the job
ORA-31626
NO_SUCH_JOB: A invalid reference to a job which is no longer executing
ORA-31627
SUCCESS_WITH_INFO: User specified job parameters that yielded informational messages
ORA-31631
PRIVILEGE_ERROR: The necessary privileges are not available for operations
ORA-31634
JOB_EXISTS: Job creation or restart failed due to duplicate name
ORA-39211
NO_DUMPFILE_INFO: User specified an invalid or inaccessible file
ORA-39327
WARNING_DV_NOENCRYPT: Database Vault data is being stored unencrypted in dump file set
Filters
Name
Object Type
Meaning
INCLUDE_NAME_EXPR
and
EXCLUDE_NAME_EXPR
Named objects
Defines which object names are included in (INCLUDE_NAME_EXPR), or excluded from (EXCLUDE_NAME_EXPR), the job.
You use the object_type parameter to limit the filter to a particular object type.
For Table mode, identifies which tables are to be processed.
SCHEMA_EXPR
Schema objects
Restricts the job to objects whose owning schema name is satisfied by the expression.
For Table mode, only a single SCHEMA_EXPR filter is supported. If specified, it must only specify a single schema (for example, 'IN (''SCOTT'')').
For Schema mode, identifies which users are to be processed.
TABLESPACE_EXPR
Table, Cluster, Index, Rollback Segment
Restricts the job to objects stored in a tablespace whose name is satisfied by the expression.
For Tablespace mode, identifies which tablespaces are to be processed. If a partition of an object is stored in the tablespace, the entire object is added to the job.
For Transportable mode, identifies which tablespaces are to be processed. If a table has a single partition in the tablespace set, all partitions must be in the tablespace set.
An index is not included within the tablespace set unless all of its partitions are in the tablespace set.
A domain index is not included in the tablespace set unless all of its secondary objects are included in the tablespace set.
INCLUDE_PATH_EXPR and EXCLUDE_PATH_EXPR
All
Defines which object paths are included in, or excluded from, the job.
You use these filters to select only certain object types from the database or dump file set.
Objects of paths satisfying the condition are included (INCLUDE_PATH_EXPR) or excluded (EXCLUDE_PATH_EXPR) from the operation. The object_path parameter is not supported for these filters.
First Available
10.1
Remaps
Name
Data Type
Object Type
Meaning
REMAP_DATAFILE
Text
Library, Tablespace, Directory
Any datafile reference in the job that matches the object_type parameter and referenced the old_value datafile will be redefined to use the value datafile.
REMAP_SCHEMA
Text
Schema Objects
Any schema object in the job that matches the object_type parameter and was located in the old_value schema will be moved to the value schema.
Any storage segment in the job that matches the object_type parameter and was located in the old_value tablespace will be relocated to the value tablespace.
Adds a sequential device to the dump file set for Export, Import, or Sql_file operations
dbms_datapump.add_device(
handle IN NUMBER, -- job handle
device_name IN VARCHAR2, -- name of device being added
volume_size IN VARCHAR2 DEFAULT NULL); -- device storage capacity
Adds files to the dump file set for an Export, Import, or SQL_FILE operation or specifies the log file or the output file for a SQL_FILE operation
dbms_datapump.add_file(
handle IN NUMBER,
filename IN VARCHAR2,
directory IN VARCHAR2 DEFAULT NULL,
filesize IN VARCHAR2 DEFAULT NULL,
filetype IN NUMBER DEFAULT dbms_datapump.ku$_file_type_dump_file,
reusefile IN NUMBER DEFAULT NULL);
dbms_datapump.create_job_view(
job_schema IN VARCHAR2,
job_name IN VARCHAR2,
view_name IN VARCHAR2,
view_type IN VARCHAR2 DEFAULT dbms_datapump.ku$_job_view_all);
TBD
Overload 2
dbms_datapump.create_job_view(
handle IN NUMBER,
view_name IN VARCHAR2,
view_type IN VARCHAR2 DEFAULT dbms_datapump.ku$_job_view_all);
Specifies restrictions on the rows that are to be retrieved
Overload 1
dbms_datapump.data_filter (
handle IN NUMBER,
name IN VARCHAR2, -- filter name
value IN NUMBER,
table_name IN VARCHAR2 DEFAULT NULL, -- if not specified = all
schema_name IN VARCHAR2 DEFAULT NULL);
TBD
Overload 2
dbms_datapump.data_filter(
handle IN NUMBER,
name IN VARCHAR2,
value IN CLOB,
table_name IN VARCHAR2 DEFAULT NULL,
schema_name IN VARCHAR2 DEFAULT NULL);
TBD
Overload 3
dbms_datapump.data_filter(
handle IN NUMBER,
name IN VARCHAR2,
value IN VARCHAR2,
table_name IN VARCHAR2 DEFAULT NULL,
schema_name IN VARCHAR2 DEFAULT NULL);
dbms_datapump.data_remap(
handle IN NUMBER, -- job handle
name IN VARCHAR2,
table_name IN VARCHAR2,
column IN VARCHAR2,
function IN VARCHAR2,
schema IN VARCHAR2 DEFAULT NULL,
remap_flags IN NUMBER DEFAULT 0); -- alt. value dbms_datapump.ku$_data_remap_with_rowid
Returns information about an object from the master table
dbms_datapump.get_object_info(
object_owner IN VARCHAR2,
object_name IN VARCHAR2,
object_type IN VARCHAR2,
processing_status OUT VARCHAR2,
processing_state OUT VARCHAR2);
DECLARE
pstatus VARCHAR2(30);
pstate VARCHAR2(30);
BEGIN
dbms_datapump.get_object_info('UWCLASS', 'SERVERS', 'TABLE', pstatus, pstate);
dbms_output.put_line(pstatus);
dbms_output.put_line(pstate);
END;
/
*
ORA-31623: a job is not attached to this session via the specified handle
dbms_datapump.log_error(
handle IN NUMBER,
message IN VARCHAR2,
error_number IN NUMBER DEFAULT 0,
fatal_error IN NUMBER DEFAULT 0,
log_file_only IN NUMBER DEFAULT 0);
Creates a filters that restricts the items that are included in a job
Overload 1
dbms_datapump.metadata_filter (
handle IN NUMBER,
name IN VARCHAR2,
value IN VARCHAR2,
object_path IN VARCHAR2 DEFAULT NULL,
object_type IN VARCHAR2 DEFAULT NULL);
Transform names and values (valid for both overloads)
Filter Name
Transform Values
Data Type
Applicable Object Types
NAME_EXPR
A single named object
TEXT
All
NAME_LIST
A comma-separated list of objects
TEXT
All
SCHEMA_EXPR
A single named schema
TEXT
Schema
SCHEMA_LIST
A comma-separated list of schemas
TEXT
Schemas
TABLESPACE_EXPR
A single named tablespace
TEXT
Table, Cluster, Index
TABLESPACE_LIST
A comma-separated list of tablespaces
TEXT
Table, Cluster, Index
INCLUDE_PATH_EXPR
A
TEXT
All
INCLUDE_PATH_LIST
A comma-separated list of
TEXT
All
EXCLUDE_PATH_EXPR
TEXT
All
EXCLUDE_PATH_LIST
A comma-separated list of
TEXT
All
EXCLUDE_TABLES
A comma-separated list of tables to be excluded (not exported)
TEXT
Table Export
TABLES_AS_VIEWS
A comma-separated list of views to be exported as tables
TEXT
Table Export
See Export Demo Below
Overload 2
dbms_datapump.metadata_filter (
handle IN NUMBER,
name IN VARCHAR2,
value IN CLOB,
object_path IN VARCHAR2 DEFAULT NULL,
object_type IN VARCHAR2 DEFAULT NULL);
Specifies transformations to be applied to objects as they are processed in the specified job
Overload 1
dbms_datapump.metadata_transform (
handle IN NUMBER,
name IN VARCHAR2,
value IN VARCHAR2,
object_type IN VARCHAR2 DEFAULT NULL);
Transform names and values (valid for both overloads)
Transform Name
Transform Values
Data Type
Application
DISABLE_ARCHIVE_LOGGING
N or Y
TEXT
TABLE, INDEX
LOB_STORAGE
BASICFILE, DEFAULT, NO_CHANGE, SECUREFILE
TEXT
TABLE
OID
0 or 1: The default = 1
NUMBER
TYPE TABLE
PCTSPACE
Defaults = 100
NUMBER
TABLE, INDEX TABLESPACE
SEGMENT_ATTRIBUTES
If non-zero drop storage segment params: Default is 1
NUMBER
TABLE, INDEX
SEGMENT_CREATION
If non-zero includes the segment creation clause: Default = 1
NUMBER
TABLE
STORAGE
If non-zero drops the storage clause: Default = 1
NUMBER
TABLE
TABLE_COMPRESSION_CLAUSE
Specify NONE to no specify a compression clause
TEXT
TABLE
The list of object types can be found in the DATABASE_EXPORT_OBJECTS, SCHEMA_EXPORT_OBJECTS, TABLE_EXPORT_OBJECTS, and the TABLESPACE_EXPORT_OBJECTS views.
TBD
Overload 2
dbms_datapump.metadata_transform (
handle IN NUMBER,
name IN VARCHAR2,
value IN NUMBER,
object_type IN VARCHAR2 DEFAULT NULL);
The list of object types can be found in the DATABASE_EXPORT_OBJECTS, SCHEMA_EXPORT_OBJECTS, TABLE_EXPORT_OBJECTS, and the TABLESPACE_EXPORT_OBJECTS views.
Declare a new job using the Data Pump API. The handle that is returned is used as a parameter for calls to all other procedures except ATTACH.
dbms_datapump.open (
operation IN VARCHAR2,
job_mode IN VARCHAR2,
remote_link IN VARCHAR2 DEFAULT NULL,
job_name IN VARCHAR2 DEFAULT NULL,
version IN VARCHAR2 DEFAULT 'COMPATIBLE',
compression IN NUMBER DEFAULT dbms_datapump.ku$_compress_metadata)
RETURN NUMBER;
Job Mode
Description
FULL
Full database excluding the SYS, XDB,ORDSYS, MDSYS, CTXSYS, ORDPLUGINS, and LBACSYS schemas
SCHEMA
Operates on a set of selected schemas. Defaults to current user.
All objects in the selected schemas are processed. Users cannot specify schemas noted above
TABLE
Operates on a set of selected tables.
Defaults to all of the tables in the current user's schema. Only tables and their dependent objects are processed.
TABLESPACE
Operates on a set of selected tablespaces.
No defaulting is performed. Tables that have storage in the specified tablespaces are processed in the same manner as in Table mode.
TRANSPORTABLE
Operates on metadata for tables (and their dependent objects)
within a set of selected tablespaces to perform a transportable tablespace export/import.
dbms_datapump.setup_remote_context(
user_name IN VARCHAR2,
job_name IN VARCHAR2,
version IN NUMBER,
status_xml IN VARCHAR2,
status_xml_len IN NUMBER,
more IN NUMBER);
dbms_datapump.start_job (
handle IN NUMBER,
skip_current IN NUMBER DEFAULT 0,
abort_step IN NUMBER DEFAULT 0,
cluster_ok IN NUMBER DEFAULT 1,
service_name IN VARCHAR2 DEFAULT NULL);
set linesize 121
col owner format a20
col directory_name format a15
col directory_path format a60
SELECT *
FROM dba_directories;
HOST mkdir c:\expimp
HOST mkdir c:\expimp\logs
EXIT
CREATE OR REPLACE DIRECTORY expimp AS 'c:\expimp';
CREATE OR REPLACE DIRECTORY expimp_log AS 'c:\expimp\logs';
GRANT READ,WRITE ON DIRECTORY expimp TO system;
GRANT READ,WRITE ON DIRECTORY expimp_log TO system;
HOST del c:\expimp\*.dmp
HOST del c:\expimp_logs\*.log
set serveroutput on
DECLARE
ind NUMBER; -- loop index
dph NUMBER; -- job handle
pct_done NUMBER; -- percentage complete
job_state VARCHAR2(30); -- track job state
le ku$_LogEntry; -- WIP and error messages
js ku$_JobStatus; -- job status from get_status
jd ku$_JobDesc; -- job description from get_status
sts ku$_Status; -- status object returned by get_status
BEGIN
-- create job
dph := dbms_datapump.open('EXPORT','SCHEMA',NULL,'EXAMPLE5','LATEST');
-- if the percentage done changed, display the new value
IF js.percent_done != pct_done THEN
dbms_output.put_line('*** Job percent done = ' || TO_CHAR(js.percent_done));
pct_done := js.percent_done;
END IF;
-- display any work-in-progress (WIP) or error messages received from the job
IF (BITAND(sts.mask, dbms_datapump.ku$_status_wip) != 0) THEN
le := sts.wip;
ELSE
IF (BITAND(sts.mask, dbms_datapump.ku$_status_job_error) != 0) THEN
le := sts.error;
ELSE
le := NULL;
END IF;
END IF;
IF le IS NOT NULL THEN
ind := le.FIRST;
WHILE ind IS NOT NULL LOOP
dbms_output.put_line(le(ind).LogText);
ind := le.NEXT(ind);
END LOOP;
END IF;
END LOOP;
-- indicate that the job finished and detach from it
dbms_output.put_line('Job has completed');
dbms_output.put_line('Final job state = ' || job_state);
dbms_datapump.detach(dph);
EXCEPTION
WHEN OTHERS THEN
dbms_datapump.stop_job(dph);
END;
/